azure-native.awsconnector.DynamoDbTable
Explore with Pulumi AI
A Microsoft.AwsConnector resource Azure REST API version: 2024-12-01.
Example Usage
DynamoDbTables_CreateOrReplace
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using AzureNative = Pulumi.AzureNative;
return await Deployment.RunAsync(() => 
{
    var dynamoDbTable = new AzureNative.AwsConnector.DynamoDbTable("dynamoDbTable", new()
    {
        Location = "fmkjilswdjyisfuwxuj",
        Name = "Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])",
        Properties = new AzureNative.AwsConnector.Inputs.DynamoDBTablePropertiesArgs
        {
            Arn = "gimtbcfiznraniycjyalnwrfstm",
            AwsAccountId = "dejqcxb",
            AwsProperties = new AzureNative.AwsConnector.Inputs.AwsDynamoDBTablePropertiesArgs
            {
                Arn = "qbvqgymuxfzuwybdspdhcuvfouwnet",
                AttributeDefinitions = new[]
                {
                    new AzureNative.AwsConnector.Inputs.AttributeDefinitionArgs
                    {
                        AttributeName = "caryhpofnkqtoc",
                        AttributeType = "bcmjgzaljcemcrswr",
                    },
                },
                BillingMode = "pwxrsjcybdcidejuhvrckvxyxad",
                ContributorInsightsSpecification = new AzureNative.AwsConnector.Inputs.ContributorInsightsSpecificationArgs
                {
                    Enabled = true,
                },
                DeletionProtectionEnabled = true,
                GlobalSecondaryIndexes = new[]
                {
                    new AzureNative.AwsConnector.Inputs.GlobalSecondaryIndexArgs
                    {
                        ContributorInsightsSpecification = new AzureNative.AwsConnector.Inputs.ContributorInsightsSpecificationArgs
                        {
                            Enabled = true,
                        },
                        IndexName = "uqlzacnvsvayrvirrwwttb",
                        KeySchema = new[]
                        {
                            new AzureNative.AwsConnector.Inputs.KeySchemaArgs
                            {
                                AttributeName = "wisgqkyoouaxivtrtay",
                                KeyType = "kwkqgbxrwnoklpgmoypovxe",
                            },
                        },
                        Projection = new AzureNative.AwsConnector.Inputs.ProjectionArgs
                        {
                            NonKeyAttributes = new[]
                            {
                                "loqmvohtjsscueegam",
                            },
                            ProjectionType = "atbzepkydpgudoaqi",
                        },
                        ProvisionedThroughput = new AzureNative.AwsConnector.Inputs.ProvisionedThroughputArgs
                        {
                            ReadCapacityUnits = 10,
                            WriteCapacityUnits = 28,
                        },
                    },
                },
                ImportSourceSpecification = new AzureNative.AwsConnector.Inputs.ImportSourceSpecificationArgs
                {
                    InputCompressionType = "bjswmnwxleqmcth",
                    InputFormat = "grnhhysgejvbnecrqoynjomz",
                    InputFormatOptions = new AzureNative.AwsConnector.Inputs.InputFormatOptionsArgs
                    {
                        Csv = new AzureNative.AwsConnector.Inputs.CsvArgs
                        {
                            Delimiter = "qzowvvpwwhptthlgvrtnpyjszetrt",
                            HeaderList = new[]
                            {
                                "gminuylhgebpjx",
                            },
                        },
                    },
                    S3BucketSource = new AzureNative.AwsConnector.Inputs.S3BucketSourceArgs
                    {
                        S3Bucket = "exulhkspgmo",
                        S3BucketOwner = "pyawhaxbwqhgarz",
                        S3KeyPrefix = "ogjgqdsvu",
                    },
                },
                KeySchema = new[]
                {
                    new AzureNative.AwsConnector.Inputs.KeySchemaArgs
                    {
                        AttributeName = "wisgqkyoouaxivtrtay",
                        KeyType = "kwkqgbxrwnoklpgmoypovxe",
                    },
                },
                KinesisStreamSpecification = new AzureNative.AwsConnector.Inputs.KinesisStreamSpecificationArgs
                {
                    ApproximateCreationDateTimePrecision = AzureNative.AwsConnector.KinesisStreamSpecificationApproximateCreationDateTimePrecision.MICROSECOND,
                    StreamArn = "qldltl",
                },
                LocalSecondaryIndexes = new[]
                {
                    new AzureNative.AwsConnector.Inputs.LocalSecondaryIndexArgs
                    {
                        IndexName = "gintyosxvkjqpe",
                        KeySchema = new[]
                        {
                            new AzureNative.AwsConnector.Inputs.KeySchemaArgs
                            {
                                AttributeName = "wisgqkyoouaxivtrtay",
                                KeyType = "kwkqgbxrwnoklpgmoypovxe",
                            },
                        },
                        Projection = new AzureNative.AwsConnector.Inputs.ProjectionArgs
                        {
                            NonKeyAttributes = new[]
                            {
                                "loqmvohtjsscueegam",
                            },
                            ProjectionType = "atbzepkydpgudoaqi",
                        },
                    },
                },
                PointInTimeRecoverySpecification = new AzureNative.AwsConnector.Inputs.PointInTimeRecoverySpecificationArgs
                {
                    PointInTimeRecoveryEnabled = true,
                },
                ProvisionedThroughput = new AzureNative.AwsConnector.Inputs.ProvisionedThroughputArgs
                {
                    ReadCapacityUnits = 10,
                    WriteCapacityUnits = 28,
                },
                ResourcePolicy = null,
                SseSpecification = new AzureNative.AwsConnector.Inputs.SSESpecificationArgs
                {
                    KmsMasterKeyId = "rvwuejohzknzrntkvprgxt",
                    SseEnabled = true,
                    SseType = "osjalywculjbrystezvjojxe",
                },
                StreamArn = "xvkrzs",
                StreamSpecification = new AzureNative.AwsConnector.Inputs.StreamSpecificationArgs
                {
                    ResourcePolicy = null,
                    StreamViewType = "wemod",
                },
                TableClass = "tmbfrfbppwhjpm",
                TableName = "mqvlcdboopn",
                Tags = new[]
                {
                    new AzureNative.AwsConnector.Inputs.TagArgs
                    {
                        Key = "txipennfw",
                        Value = "dkgweupnz",
                    },
                },
                TimeToLiveSpecification = new AzureNative.AwsConnector.Inputs.TimeToLiveSpecificationArgs
                {
                    AttributeName = "sxbfejubturdtyusqywguqni",
                    Enabled = true,
                },
            },
            AwsRegion = "rdzrhtbydhmaxzuwe",
            AwsSourceSchema = "sqkkuxwamzevkp",
            AwsTags = 
            {
                { "key3791", "iikafuvbjkvnbogujm" },
            },
            PublicCloudConnectorsResourceId = "nugnoqcknmrrminwvfvloqsporjd",
            PublicCloudResourceName = "lkbwyvnzooydbnembmykhmw",
        },
        ResourceGroupName = "rgdynamoDBTable",
        Tags = 
        {
            { "key2178", "lyeternduvkobwvqhpicnxel" },
        },
    });
});
package main
import (
	awsconnector "github.com/pulumi/pulumi-azure-native-sdk/awsconnector/v2"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := awsconnector.NewDynamoDbTable(ctx, "dynamoDbTable", &awsconnector.DynamoDbTableArgs{
			Location: pulumi.String("fmkjilswdjyisfuwxuj"),
			Name:     pulumi.String("Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])"),
			Properties: &awsconnector.DynamoDBTablePropertiesArgs{
				Arn:          pulumi.String("gimtbcfiznraniycjyalnwrfstm"),
				AwsAccountId: pulumi.String("dejqcxb"),
				AwsProperties: &awsconnector.AwsDynamoDBTablePropertiesArgs{
					Arn: pulumi.String("qbvqgymuxfzuwybdspdhcuvfouwnet"),
					AttributeDefinitions: awsconnector.AttributeDefinitionArray{
						&awsconnector.AttributeDefinitionArgs{
							AttributeName: pulumi.String("caryhpofnkqtoc"),
							AttributeType: pulumi.String("bcmjgzaljcemcrswr"),
						},
					},
					BillingMode: pulumi.String("pwxrsjcybdcidejuhvrckvxyxad"),
					ContributorInsightsSpecification: &awsconnector.ContributorInsightsSpecificationArgs{
						Enabled: pulumi.Bool(true),
					},
					DeletionProtectionEnabled: pulumi.Bool(true),
					GlobalSecondaryIndexes: awsconnector.GlobalSecondaryIndexArray{
						&awsconnector.GlobalSecondaryIndexArgs{
							ContributorInsightsSpecification: &awsconnector.ContributorInsightsSpecificationArgs{
								Enabled: pulumi.Bool(true),
							},
							IndexName: pulumi.String("uqlzacnvsvayrvirrwwttb"),
							KeySchema: awsconnector.KeySchemaArray{
								&awsconnector.KeySchemaArgs{
									AttributeName: pulumi.String("wisgqkyoouaxivtrtay"),
									KeyType:       pulumi.String("kwkqgbxrwnoklpgmoypovxe"),
								},
							},
							Projection: &awsconnector.ProjectionArgs{
								NonKeyAttributes: pulumi.StringArray{
									pulumi.String("loqmvohtjsscueegam"),
								},
								ProjectionType: pulumi.String("atbzepkydpgudoaqi"),
							},
							ProvisionedThroughput: &awsconnector.ProvisionedThroughputArgs{
								ReadCapacityUnits:  pulumi.Int(10),
								WriteCapacityUnits: pulumi.Int(28),
							},
						},
					},
					ImportSourceSpecification: &awsconnector.ImportSourceSpecificationArgs{
						InputCompressionType: pulumi.String("bjswmnwxleqmcth"),
						InputFormat:          pulumi.String("grnhhysgejvbnecrqoynjomz"),
						InputFormatOptions: &awsconnector.InputFormatOptionsArgs{
							Csv: &awsconnector.CsvArgs{
								Delimiter: pulumi.String("qzowvvpwwhptthlgvrtnpyjszetrt"),
								HeaderList: pulumi.StringArray{
									pulumi.String("gminuylhgebpjx"),
								},
							},
						},
						S3BucketSource: &awsconnector.S3BucketSourceArgs{
							S3Bucket:      pulumi.String("exulhkspgmo"),
							S3BucketOwner: pulumi.String("pyawhaxbwqhgarz"),
							S3KeyPrefix:   pulumi.String("ogjgqdsvu"),
						},
					},
					KeySchema: awsconnector.KeySchemaArray{
						&awsconnector.KeySchemaArgs{
							AttributeName: pulumi.String("wisgqkyoouaxivtrtay"),
							KeyType:       pulumi.String("kwkqgbxrwnoklpgmoypovxe"),
						},
					},
					KinesisStreamSpecification: &awsconnector.KinesisStreamSpecificationArgs{
						ApproximateCreationDateTimePrecision: pulumi.String(awsconnector.KinesisStreamSpecificationApproximateCreationDateTimePrecisionMICROSECOND),
						StreamArn:                            pulumi.String("qldltl"),
					},
					LocalSecondaryIndexes: awsconnector.LocalSecondaryIndexArray{
						&awsconnector.LocalSecondaryIndexArgs{
							IndexName: pulumi.String("gintyosxvkjqpe"),
							KeySchema: awsconnector.KeySchemaArray{
								&awsconnector.KeySchemaArgs{
									AttributeName: pulumi.String("wisgqkyoouaxivtrtay"),
									KeyType:       pulumi.String("kwkqgbxrwnoklpgmoypovxe"),
								},
							},
							Projection: &awsconnector.ProjectionArgs{
								NonKeyAttributes: pulumi.StringArray{
									pulumi.String("loqmvohtjsscueegam"),
								},
								ProjectionType: pulumi.String("atbzepkydpgudoaqi"),
							},
						},
					},
					PointInTimeRecoverySpecification: &awsconnector.PointInTimeRecoverySpecificationArgs{
						PointInTimeRecoveryEnabled: pulumi.Bool(true),
					},
					ProvisionedThroughput: &awsconnector.ProvisionedThroughputArgs{
						ReadCapacityUnits:  pulumi.Int(10),
						WriteCapacityUnits: pulumi.Int(28),
					},
					ResourcePolicy: &awsconnector.ResourcePolicyArgs{},
					SseSpecification: &awsconnector.SSESpecificationArgs{
						KmsMasterKeyId: pulumi.String("rvwuejohzknzrntkvprgxt"),
						SseEnabled:     pulumi.Bool(true),
						SseType:        pulumi.String("osjalywculjbrystezvjojxe"),
					},
					StreamArn: pulumi.String("xvkrzs"),
					StreamSpecification: &awsconnector.StreamSpecificationArgs{
						ResourcePolicy: &awsconnector.ResourcePolicyArgs{},
						StreamViewType: pulumi.String("wemod"),
					},
					TableClass: pulumi.String("tmbfrfbppwhjpm"),
					TableName:  pulumi.String("mqvlcdboopn"),
					Tags: awsconnector.TagArray{
						&awsconnector.TagArgs{
							Key:   pulumi.String("txipennfw"),
							Value: pulumi.String("dkgweupnz"),
						},
					},
					TimeToLiveSpecification: &awsconnector.TimeToLiveSpecificationArgs{
						AttributeName: pulumi.String("sxbfejubturdtyusqywguqni"),
						Enabled:       pulumi.Bool(true),
					},
				},
				AwsRegion:       pulumi.String("rdzrhtbydhmaxzuwe"),
				AwsSourceSchema: pulumi.String("sqkkuxwamzevkp"),
				AwsTags: pulumi.StringMap{
					"key3791": pulumi.String("iikafuvbjkvnbogujm"),
				},
				PublicCloudConnectorsResourceId: pulumi.String("nugnoqcknmrrminwvfvloqsporjd"),
				PublicCloudResourceName:         pulumi.String("lkbwyvnzooydbnembmykhmw"),
			},
			ResourceGroupName: pulumi.String("rgdynamoDBTable"),
			Tags: pulumi.StringMap{
				"key2178": pulumi.String("lyeternduvkobwvqhpicnxel"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azurenative.awsconnector.DynamoDbTable;
import com.pulumi.azurenative.awsconnector.DynamoDbTableArgs;
import com.pulumi.azurenative.awsconnector.inputs.DynamoDBTablePropertiesArgs;
import com.pulumi.azurenative.awsconnector.inputs.AwsDynamoDBTablePropertiesArgs;
import com.pulumi.azurenative.awsconnector.inputs.ContributorInsightsSpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.ImportSourceSpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.InputFormatOptionsArgs;
import com.pulumi.azurenative.awsconnector.inputs.CsvArgs;
import com.pulumi.azurenative.awsconnector.inputs.S3BucketSourceArgs;
import com.pulumi.azurenative.awsconnector.inputs.KinesisStreamSpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.PointInTimeRecoverySpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.ProvisionedThroughputArgs;
import com.pulumi.azurenative.awsconnector.inputs.ResourcePolicyArgs;
import com.pulumi.azurenative.awsconnector.inputs.SSESpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.StreamSpecificationArgs;
import com.pulumi.azurenative.awsconnector.inputs.TimeToLiveSpecificationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var dynamoDbTable = new DynamoDbTable("dynamoDbTable", DynamoDbTableArgs.builder()
            .location("fmkjilswdjyisfuwxuj")
            .name("Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])")
            .properties(DynamoDBTablePropertiesArgs.builder()
                .arn("gimtbcfiznraniycjyalnwrfstm")
                .awsAccountId("dejqcxb")
                .awsProperties(AwsDynamoDBTablePropertiesArgs.builder()
                    .arn("qbvqgymuxfzuwybdspdhcuvfouwnet")
                    .attributeDefinitions(AttributeDefinitionArgs.builder()
                        .attributeName("caryhpofnkqtoc")
                        .attributeType("bcmjgzaljcemcrswr")
                        .build())
                    .billingMode("pwxrsjcybdcidejuhvrckvxyxad")
                    .contributorInsightsSpecification(ContributorInsightsSpecificationArgs.builder()
                        .enabled(true)
                        .build())
                    .deletionProtectionEnabled(true)
                    .globalSecondaryIndexes(GlobalSecondaryIndexArgs.builder()
                        .contributorInsightsSpecification(ContributorInsightsSpecificationArgs.builder()
                            .enabled(true)
                            .build())
                        .indexName("uqlzacnvsvayrvirrwwttb")
                        .keySchema(KeySchemaArgs.builder()
                            .attributeName("wisgqkyoouaxivtrtay")
                            .keyType("kwkqgbxrwnoklpgmoypovxe")
                            .build())
                        .projection(ProjectionArgs.builder()
                            .nonKeyAttributes("loqmvohtjsscueegam")
                            .projectionType("atbzepkydpgudoaqi")
                            .build())
                        .provisionedThroughput(ProvisionedThroughputArgs.builder()
                            .readCapacityUnits(10)
                            .writeCapacityUnits(28)
                            .build())
                        .build())
                    .importSourceSpecification(ImportSourceSpecificationArgs.builder()
                        .inputCompressionType("bjswmnwxleqmcth")
                        .inputFormat("grnhhysgejvbnecrqoynjomz")
                        .inputFormatOptions(InputFormatOptionsArgs.builder()
                            .csv(CsvArgs.builder()
                                .delimiter("qzowvvpwwhptthlgvrtnpyjszetrt")
                                .headerList("gminuylhgebpjx")
                                .build())
                            .build())
                        .s3BucketSource(S3BucketSourceArgs.builder()
                            .s3Bucket("exulhkspgmo")
                            .s3BucketOwner("pyawhaxbwqhgarz")
                            .s3KeyPrefix("ogjgqdsvu")
                            .build())
                        .build())
                    .keySchema(KeySchemaArgs.builder()
                        .attributeName("wisgqkyoouaxivtrtay")
                        .keyType("kwkqgbxrwnoklpgmoypovxe")
                        .build())
                    .kinesisStreamSpecification(KinesisStreamSpecificationArgs.builder()
                        .approximateCreationDateTimePrecision("MICROSECOND")
                        .streamArn("qldltl")
                        .build())
                    .localSecondaryIndexes(LocalSecondaryIndexArgs.builder()
                        .indexName("gintyosxvkjqpe")
                        .keySchema(KeySchemaArgs.builder()
                            .attributeName("wisgqkyoouaxivtrtay")
                            .keyType("kwkqgbxrwnoklpgmoypovxe")
                            .build())
                        .projection(ProjectionArgs.builder()
                            .nonKeyAttributes("loqmvohtjsscueegam")
                            .projectionType("atbzepkydpgudoaqi")
                            .build())
                        .build())
                    .pointInTimeRecoverySpecification(PointInTimeRecoverySpecificationArgs.builder()
                        .pointInTimeRecoveryEnabled(true)
                        .build())
                    .provisionedThroughput(ProvisionedThroughputArgs.builder()
                        .readCapacityUnits(10)
                        .writeCapacityUnits(28)
                        .build())
                    .resourcePolicy()
                    .sseSpecification(SSESpecificationArgs.builder()
                        .kmsMasterKeyId("rvwuejohzknzrntkvprgxt")
                        .sseEnabled(true)
                        .sseType("osjalywculjbrystezvjojxe")
                        .build())
                    .streamArn("xvkrzs")
                    .streamSpecification(StreamSpecificationArgs.builder()
                        .resourcePolicy()
                        .streamViewType("wemod")
                        .build())
                    .tableClass("tmbfrfbppwhjpm")
                    .tableName("mqvlcdboopn")
                    .tags(TagArgs.builder()
                        .key("txipennfw")
                        .value("dkgweupnz")
                        .build())
                    .timeToLiveSpecification(TimeToLiveSpecificationArgs.builder()
                        .attributeName("sxbfejubturdtyusqywguqni")
                        .enabled(true)
                        .build())
                    .build())
                .awsRegion("rdzrhtbydhmaxzuwe")
                .awsSourceSchema("sqkkuxwamzevkp")
                .awsTags(Map.of("key3791", "iikafuvbjkvnbogujm"))
                .publicCloudConnectorsResourceId("nugnoqcknmrrminwvfvloqsporjd")
                .publicCloudResourceName("lkbwyvnzooydbnembmykhmw")
                .build())
            .resourceGroupName("rgdynamoDBTable")
            .tags(Map.of("key2178", "lyeternduvkobwvqhpicnxel"))
            .build());
    }
}
import * as pulumi from "@pulumi/pulumi";
import * as azure_native from "@pulumi/azure-native";
const dynamoDbTable = new azure_native.awsconnector.DynamoDbTable("dynamoDbTable", {
    location: "fmkjilswdjyisfuwxuj",
    name: "Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])",
    properties: {
        arn: "gimtbcfiznraniycjyalnwrfstm",
        awsAccountId: "dejqcxb",
        awsProperties: {
            arn: "qbvqgymuxfzuwybdspdhcuvfouwnet",
            attributeDefinitions: [{
                attributeName: "caryhpofnkqtoc",
                attributeType: "bcmjgzaljcemcrswr",
            }],
            billingMode: "pwxrsjcybdcidejuhvrckvxyxad",
            contributorInsightsSpecification: {
                enabled: true,
            },
            deletionProtectionEnabled: true,
            globalSecondaryIndexes: [{
                contributorInsightsSpecification: {
                    enabled: true,
                },
                indexName: "uqlzacnvsvayrvirrwwttb",
                keySchema: [{
                    attributeName: "wisgqkyoouaxivtrtay",
                    keyType: "kwkqgbxrwnoklpgmoypovxe",
                }],
                projection: {
                    nonKeyAttributes: ["loqmvohtjsscueegam"],
                    projectionType: "atbzepkydpgudoaqi",
                },
                provisionedThroughput: {
                    readCapacityUnits: 10,
                    writeCapacityUnits: 28,
                },
            }],
            importSourceSpecification: {
                inputCompressionType: "bjswmnwxleqmcth",
                inputFormat: "grnhhysgejvbnecrqoynjomz",
                inputFormatOptions: {
                    csv: {
                        delimiter: "qzowvvpwwhptthlgvrtnpyjszetrt",
                        headerList: ["gminuylhgebpjx"],
                    },
                },
                s3BucketSource: {
                    s3Bucket: "exulhkspgmo",
                    s3BucketOwner: "pyawhaxbwqhgarz",
                    s3KeyPrefix: "ogjgqdsvu",
                },
            },
            keySchema: [{
                attributeName: "wisgqkyoouaxivtrtay",
                keyType: "kwkqgbxrwnoklpgmoypovxe",
            }],
            kinesisStreamSpecification: {
                approximateCreationDateTimePrecision: azure_native.awsconnector.KinesisStreamSpecificationApproximateCreationDateTimePrecision.MICROSECOND,
                streamArn: "qldltl",
            },
            localSecondaryIndexes: [{
                indexName: "gintyosxvkjqpe",
                keySchema: [{
                    attributeName: "wisgqkyoouaxivtrtay",
                    keyType: "kwkqgbxrwnoklpgmoypovxe",
                }],
                projection: {
                    nonKeyAttributes: ["loqmvohtjsscueegam"],
                    projectionType: "atbzepkydpgudoaqi",
                },
            }],
            pointInTimeRecoverySpecification: {
                pointInTimeRecoveryEnabled: true,
            },
            provisionedThroughput: {
                readCapacityUnits: 10,
                writeCapacityUnits: 28,
            },
            resourcePolicy: {},
            sseSpecification: {
                kmsMasterKeyId: "rvwuejohzknzrntkvprgxt",
                sseEnabled: true,
                sseType: "osjalywculjbrystezvjojxe",
            },
            streamArn: "xvkrzs",
            streamSpecification: {
                resourcePolicy: {},
                streamViewType: "wemod",
            },
            tableClass: "tmbfrfbppwhjpm",
            tableName: "mqvlcdboopn",
            tags: [{
                key: "txipennfw",
                value: "dkgweupnz",
            }],
            timeToLiveSpecification: {
                attributeName: "sxbfejubturdtyusqywguqni",
                enabled: true,
            },
        },
        awsRegion: "rdzrhtbydhmaxzuwe",
        awsSourceSchema: "sqkkuxwamzevkp",
        awsTags: {
            key3791: "iikafuvbjkvnbogujm",
        },
        publicCloudConnectorsResourceId: "nugnoqcknmrrminwvfvloqsporjd",
        publicCloudResourceName: "lkbwyvnzooydbnembmykhmw",
    },
    resourceGroupName: "rgdynamoDBTable",
    tags: {
        key2178: "lyeternduvkobwvqhpicnxel",
    },
});
import pulumi
import pulumi_azure_native as azure_native
dynamo_db_table = azure_native.awsconnector.DynamoDbTable("dynamoDbTable",
    location="fmkjilswdjyisfuwxuj",
    name="Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])",
    properties={
        "arn": "gimtbcfiznraniycjyalnwrfstm",
        "aws_account_id": "dejqcxb",
        "aws_properties": {
            "arn": "qbvqgymuxfzuwybdspdhcuvfouwnet",
            "attribute_definitions": [{
                "attribute_name": "caryhpofnkqtoc",
                "attribute_type": "bcmjgzaljcemcrswr",
            }],
            "billing_mode": "pwxrsjcybdcidejuhvrckvxyxad",
            "contributor_insights_specification": {
                "enabled": True,
            },
            "deletion_protection_enabled": True,
            "global_secondary_indexes": [{
                "contributor_insights_specification": {
                    "enabled": True,
                },
                "index_name": "uqlzacnvsvayrvirrwwttb",
                "key_schema": [{
                    "attribute_name": "wisgqkyoouaxivtrtay",
                    "key_type": "kwkqgbxrwnoklpgmoypovxe",
                }],
                "projection": {
                    "non_key_attributes": ["loqmvohtjsscueegam"],
                    "projection_type": "atbzepkydpgudoaqi",
                },
                "provisioned_throughput": {
                    "read_capacity_units": 10,
                    "write_capacity_units": 28,
                },
            }],
            "import_source_specification": {
                "input_compression_type": "bjswmnwxleqmcth",
                "input_format": "grnhhysgejvbnecrqoynjomz",
                "input_format_options": {
                    "csv": {
                        "delimiter": "qzowvvpwwhptthlgvrtnpyjszetrt",
                        "header_list": ["gminuylhgebpjx"],
                    },
                },
                "s3_bucket_source": {
                    "s3_bucket": "exulhkspgmo",
                    "s3_bucket_owner": "pyawhaxbwqhgarz",
                    "s3_key_prefix": "ogjgqdsvu",
                },
            },
            "key_schema": [{
                "attribute_name": "wisgqkyoouaxivtrtay",
                "key_type": "kwkqgbxrwnoklpgmoypovxe",
            }],
            "kinesis_stream_specification": {
                "approximate_creation_date_time_precision": azure_native.awsconnector.KinesisStreamSpecificationApproximateCreationDateTimePrecision.MICROSECOND,
                "stream_arn": "qldltl",
            },
            "local_secondary_indexes": [{
                "index_name": "gintyosxvkjqpe",
                "key_schema": [{
                    "attribute_name": "wisgqkyoouaxivtrtay",
                    "key_type": "kwkqgbxrwnoklpgmoypovxe",
                }],
                "projection": {
                    "non_key_attributes": ["loqmvohtjsscueegam"],
                    "projection_type": "atbzepkydpgudoaqi",
                },
            }],
            "point_in_time_recovery_specification": {
                "point_in_time_recovery_enabled": True,
            },
            "provisioned_throughput": {
                "read_capacity_units": 10,
                "write_capacity_units": 28,
            },
            "resource_policy": {},
            "sse_specification": {
                "kms_master_key_id": "rvwuejohzknzrntkvprgxt",
                "sse_enabled": True,
                "sse_type": "osjalywculjbrystezvjojxe",
            },
            "stream_arn": "xvkrzs",
            "stream_specification": {
                "resource_policy": {},
                "stream_view_type": "wemod",
            },
            "table_class": "tmbfrfbppwhjpm",
            "table_name": "mqvlcdboopn",
            "tags": [{
                "key": "txipennfw",
                "value": "dkgweupnz",
            }],
            "time_to_live_specification": {
                "attribute_name": "sxbfejubturdtyusqywguqni",
                "enabled": True,
            },
        },
        "aws_region": "rdzrhtbydhmaxzuwe",
        "aws_source_schema": "sqkkuxwamzevkp",
        "aws_tags": {
            "key3791": "iikafuvbjkvnbogujm",
        },
        "public_cloud_connectors_resource_id": "nugnoqcknmrrminwvfvloqsporjd",
        "public_cloud_resource_name": "lkbwyvnzooydbnembmykhmw",
    },
    resource_group_name="rgdynamoDBTable",
    tags={
        "key2178": "lyeternduvkobwvqhpicnxel",
    })
resources:
  dynamoDbTable:
    type: azure-native:awsconnector:DynamoDbTable
    properties:
      location: fmkjilswdjyisfuwxuj
      name: Replace this value with a string matching RegExp ^(z=.{0,259}[^zs.]$)(z!.*[zzzzzzzz])
      properties:
        arn: gimtbcfiznraniycjyalnwrfstm
        awsAccountId: dejqcxb
        awsProperties:
          arn: qbvqgymuxfzuwybdspdhcuvfouwnet
          attributeDefinitions:
            - attributeName: caryhpofnkqtoc
              attributeType: bcmjgzaljcemcrswr
          billingMode: pwxrsjcybdcidejuhvrckvxyxad
          contributorInsightsSpecification:
            enabled: true
          deletionProtectionEnabled: true
          globalSecondaryIndexes:
            - contributorInsightsSpecification:
                enabled: true
              indexName: uqlzacnvsvayrvirrwwttb
              keySchema:
                - attributeName: wisgqkyoouaxivtrtay
                  keyType: kwkqgbxrwnoklpgmoypovxe
              projection:
                nonKeyAttributes:
                  - loqmvohtjsscueegam
                projectionType: atbzepkydpgudoaqi
              provisionedThroughput:
                readCapacityUnits: 10
                writeCapacityUnits: 28
          importSourceSpecification:
            inputCompressionType: bjswmnwxleqmcth
            inputFormat: grnhhysgejvbnecrqoynjomz
            inputFormatOptions:
              csv:
                delimiter: qzowvvpwwhptthlgvrtnpyjszetrt
                headerList:
                  - gminuylhgebpjx
            s3BucketSource:
              s3Bucket: exulhkspgmo
              s3BucketOwner: pyawhaxbwqhgarz
              s3KeyPrefix: ogjgqdsvu
          keySchema:
            - attributeName: wisgqkyoouaxivtrtay
              keyType: kwkqgbxrwnoklpgmoypovxe
          kinesisStreamSpecification:
            approximateCreationDateTimePrecision: MICROSECOND
            streamArn: qldltl
          localSecondaryIndexes:
            - indexName: gintyosxvkjqpe
              keySchema:
                - attributeName: wisgqkyoouaxivtrtay
                  keyType: kwkqgbxrwnoklpgmoypovxe
              projection:
                nonKeyAttributes:
                  - loqmvohtjsscueegam
                projectionType: atbzepkydpgudoaqi
          pointInTimeRecoverySpecification:
            pointInTimeRecoveryEnabled: true
          provisionedThroughput:
            readCapacityUnits: 10
            writeCapacityUnits: 28
          resourcePolicy: {}
          sseSpecification:
            kmsMasterKeyId: rvwuejohzknzrntkvprgxt
            sseEnabled: true
            sseType: osjalywculjbrystezvjojxe
          streamArn: xvkrzs
          streamSpecification:
            resourcePolicy: {}
            streamViewType: wemod
          tableClass: tmbfrfbppwhjpm
          tableName: mqvlcdboopn
          tags:
            - key: txipennfw
              value: dkgweupnz
          timeToLiveSpecification:
            attributeName: sxbfejubturdtyusqywguqni
            enabled: true
        awsRegion: rdzrhtbydhmaxzuwe
        awsSourceSchema: sqkkuxwamzevkp
        awsTags:
          key3791: iikafuvbjkvnbogujm
        publicCloudConnectorsResourceId: nugnoqcknmrrminwvfvloqsporjd
        publicCloudResourceName: lkbwyvnzooydbnembmykhmw
      resourceGroupName: rgdynamoDBTable
      tags:
        key2178: lyeternduvkobwvqhpicnxel
Create DynamoDbTable Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new DynamoDbTable(name: string, args: DynamoDbTableArgs, opts?: CustomResourceOptions);@overload
def DynamoDbTable(resource_name: str,
                  args: DynamoDbTableArgs,
                  opts: Optional[ResourceOptions] = None)
@overload
def DynamoDbTable(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  resource_group_name: Optional[str] = None,
                  location: Optional[str] = None,
                  name: Optional[str] = None,
                  properties: Optional[DynamoDBTablePropertiesArgs] = None,
                  tags: Optional[Mapping[str, str]] = None)func NewDynamoDbTable(ctx *Context, name string, args DynamoDbTableArgs, opts ...ResourceOption) (*DynamoDbTable, error)public DynamoDbTable(string name, DynamoDbTableArgs args, CustomResourceOptions? opts = null)
public DynamoDbTable(String name, DynamoDbTableArgs args)
public DynamoDbTable(String name, DynamoDbTableArgs args, CustomResourceOptions options)
type: azure-native:awsconnector:DynamoDbTable
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DynamoDbTableArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var dynamoDbTableResource = new AzureNative.AwsConnector.DynamoDbTable("dynamoDbTableResource", new()
{
    ResourceGroupName = "string",
    Location = "string",
    Name = "string",
    Properties = new AzureNative.AwsConnector.Inputs.DynamoDBTablePropertiesArgs
    {
        Arn = "string",
        AwsAccountId = "string",
        AwsProperties = new AzureNative.AwsConnector.Inputs.AwsDynamoDBTablePropertiesArgs
        {
            Arn = "string",
            AttributeDefinitions = new[]
            {
                new AzureNative.AwsConnector.Inputs.AttributeDefinitionArgs
                {
                    AttributeName = "string",
                    AttributeType = "string",
                },
            },
            BillingMode = "string",
            ContributorInsightsSpecification = new AzureNative.AwsConnector.Inputs.ContributorInsightsSpecificationArgs
            {
                Enabled = false,
            },
            DeletionProtectionEnabled = false,
            GlobalSecondaryIndexes = new[]
            {
                new AzureNative.AwsConnector.Inputs.GlobalSecondaryIndexArgs
                {
                    ContributorInsightsSpecification = new AzureNative.AwsConnector.Inputs.ContributorInsightsSpecificationArgs
                    {
                        Enabled = false,
                    },
                    IndexName = "string",
                    KeySchema = new[]
                    {
                        new AzureNative.AwsConnector.Inputs.KeySchemaArgs
                        {
                            AttributeName = "string",
                            KeyType = "string",
                        },
                    },
                    Projection = new AzureNative.AwsConnector.Inputs.ProjectionArgs
                    {
                        NonKeyAttributes = new[]
                        {
                            "string",
                        },
                        ProjectionType = "string",
                    },
                    ProvisionedThroughput = new AzureNative.AwsConnector.Inputs.ProvisionedThroughputArgs
                    {
                        ReadCapacityUnits = 0,
                        WriteCapacityUnits = 0,
                    },
                },
            },
            ImportSourceSpecification = new AzureNative.AwsConnector.Inputs.ImportSourceSpecificationArgs
            {
                InputCompressionType = "string",
                InputFormat = "string",
                InputFormatOptions = new AzureNative.AwsConnector.Inputs.InputFormatOptionsArgs
                {
                    Csv = new AzureNative.AwsConnector.Inputs.CsvArgs
                    {
                        Delimiter = "string",
                        HeaderList = new[]
                        {
                            "string",
                        },
                    },
                },
                S3BucketSource = new AzureNative.AwsConnector.Inputs.S3BucketSourceArgs
                {
                    S3Bucket = "string",
                    S3BucketOwner = "string",
                    S3KeyPrefix = "string",
                },
            },
            KeySchema = new[]
            {
                new AzureNative.AwsConnector.Inputs.KeySchemaArgs
                {
                    AttributeName = "string",
                    KeyType = "string",
                },
            },
            KinesisStreamSpecification = new AzureNative.AwsConnector.Inputs.KinesisStreamSpecificationArgs
            {
                ApproximateCreationDateTimePrecision = "string",
                StreamArn = "string",
            },
            LocalSecondaryIndexes = new[]
            {
                new AzureNative.AwsConnector.Inputs.LocalSecondaryIndexArgs
                {
                    IndexName = "string",
                    KeySchema = new[]
                    {
                        new AzureNative.AwsConnector.Inputs.KeySchemaArgs
                        {
                            AttributeName = "string",
                            KeyType = "string",
                        },
                    },
                    Projection = new AzureNative.AwsConnector.Inputs.ProjectionArgs
                    {
                        NonKeyAttributes = new[]
                        {
                            "string",
                        },
                        ProjectionType = "string",
                    },
                },
            },
            PointInTimeRecoverySpecification = new AzureNative.AwsConnector.Inputs.PointInTimeRecoverySpecificationArgs
            {
                PointInTimeRecoveryEnabled = false,
            },
            ProvisionedThroughput = new AzureNative.AwsConnector.Inputs.ProvisionedThroughputArgs
            {
                ReadCapacityUnits = 0,
                WriteCapacityUnits = 0,
            },
            ResourcePolicy = new AzureNative.AwsConnector.Inputs.ResourcePolicyArgs
            {
                PolicyDocument = "any",
            },
            SseSpecification = new AzureNative.AwsConnector.Inputs.SSESpecificationArgs
            {
                KmsMasterKeyId = "string",
                SseEnabled = false,
                SseType = "string",
            },
            StreamArn = "string",
            StreamSpecification = new AzureNative.AwsConnector.Inputs.StreamSpecificationArgs
            {
                ResourcePolicy = new AzureNative.AwsConnector.Inputs.ResourcePolicyArgs
                {
                    PolicyDocument = "any",
                },
                StreamViewType = "string",
            },
            TableClass = "string",
            TableName = "string",
            Tags = new[]
            {
                new AzureNative.AwsConnector.Inputs.TagArgs
                {
                    Key = "string",
                    Value = "string",
                },
            },
            TimeToLiveSpecification = new AzureNative.AwsConnector.Inputs.TimeToLiveSpecificationArgs
            {
                AttributeName = "string",
                Enabled = false,
            },
        },
        AwsRegion = "string",
        AwsSourceSchema = "string",
        AwsTags = 
        {
            { "string", "string" },
        },
        PublicCloudConnectorsResourceId = "string",
        PublicCloudResourceName = "string",
    },
    Tags = 
    {
        { "string", "string" },
    },
});
example, err := awsconnector.NewDynamoDbTable(ctx, "dynamoDbTableResource", &awsconnector.DynamoDbTableArgs{
	ResourceGroupName: pulumi.String("string"),
	Location:          pulumi.String("string"),
	Name:              pulumi.String("string"),
	Properties: &awsconnector.DynamoDBTablePropertiesArgs{
		Arn:          pulumi.String("string"),
		AwsAccountId: pulumi.String("string"),
		AwsProperties: &awsconnector.AwsDynamoDBTablePropertiesArgs{
			Arn: pulumi.String("string"),
			AttributeDefinitions: awsconnector.AttributeDefinitionArray{
				&awsconnector.AttributeDefinitionArgs{
					AttributeName: pulumi.String("string"),
					AttributeType: pulumi.String("string"),
				},
			},
			BillingMode: pulumi.String("string"),
			ContributorInsightsSpecification: &awsconnector.ContributorInsightsSpecificationArgs{
				Enabled: pulumi.Bool(false),
			},
			DeletionProtectionEnabled: pulumi.Bool(false),
			GlobalSecondaryIndexes: awsconnector.GlobalSecondaryIndexArray{
				&awsconnector.GlobalSecondaryIndexArgs{
					ContributorInsightsSpecification: &awsconnector.ContributorInsightsSpecificationArgs{
						Enabled: pulumi.Bool(false),
					},
					IndexName: pulumi.String("string"),
					KeySchema: awsconnector.KeySchemaArray{
						&awsconnector.KeySchemaArgs{
							AttributeName: pulumi.String("string"),
							KeyType:       pulumi.String("string"),
						},
					},
					Projection: &awsconnector.ProjectionArgs{
						NonKeyAttributes: pulumi.StringArray{
							pulumi.String("string"),
						},
						ProjectionType: pulumi.String("string"),
					},
					ProvisionedThroughput: &awsconnector.ProvisionedThroughputArgs{
						ReadCapacityUnits:  pulumi.Int(0),
						WriteCapacityUnits: pulumi.Int(0),
					},
				},
			},
			ImportSourceSpecification: &awsconnector.ImportSourceSpecificationArgs{
				InputCompressionType: pulumi.String("string"),
				InputFormat:          pulumi.String("string"),
				InputFormatOptions: &awsconnector.InputFormatOptionsArgs{
					Csv: &awsconnector.CsvArgs{
						Delimiter: pulumi.String("string"),
						HeaderList: pulumi.StringArray{
							pulumi.String("string"),
						},
					},
				},
				S3BucketSource: &awsconnector.S3BucketSourceArgs{
					S3Bucket:      pulumi.String("string"),
					S3BucketOwner: pulumi.String("string"),
					S3KeyPrefix:   pulumi.String("string"),
				},
			},
			KeySchema: awsconnector.KeySchemaArray{
				&awsconnector.KeySchemaArgs{
					AttributeName: pulumi.String("string"),
					KeyType:       pulumi.String("string"),
				},
			},
			KinesisStreamSpecification: &awsconnector.KinesisStreamSpecificationArgs{
				ApproximateCreationDateTimePrecision: pulumi.String("string"),
				StreamArn:                            pulumi.String("string"),
			},
			LocalSecondaryIndexes: awsconnector.LocalSecondaryIndexArray{
				&awsconnector.LocalSecondaryIndexArgs{
					IndexName: pulumi.String("string"),
					KeySchema: awsconnector.KeySchemaArray{
						&awsconnector.KeySchemaArgs{
							AttributeName: pulumi.String("string"),
							KeyType:       pulumi.String("string"),
						},
					},
					Projection: &awsconnector.ProjectionArgs{
						NonKeyAttributes: pulumi.StringArray{
							pulumi.String("string"),
						},
						ProjectionType: pulumi.String("string"),
					},
				},
			},
			PointInTimeRecoverySpecification: &awsconnector.PointInTimeRecoverySpecificationArgs{
				PointInTimeRecoveryEnabled: pulumi.Bool(false),
			},
			ProvisionedThroughput: &awsconnector.ProvisionedThroughputArgs{
				ReadCapacityUnits:  pulumi.Int(0),
				WriteCapacityUnits: pulumi.Int(0),
			},
			ResourcePolicy: &awsconnector.ResourcePolicyArgs{
				PolicyDocument: pulumi.Any("any"),
			},
			SseSpecification: &awsconnector.SSESpecificationArgs{
				KmsMasterKeyId: pulumi.String("string"),
				SseEnabled:     pulumi.Bool(false),
				SseType:        pulumi.String("string"),
			},
			StreamArn: pulumi.String("string"),
			StreamSpecification: &awsconnector.StreamSpecificationArgs{
				ResourcePolicy: &awsconnector.ResourcePolicyArgs{
					PolicyDocument: pulumi.Any("any"),
				},
				StreamViewType: pulumi.String("string"),
			},
			TableClass: pulumi.String("string"),
			TableName:  pulumi.String("string"),
			Tags: awsconnector.TagArray{
				&awsconnector.TagArgs{
					Key:   pulumi.String("string"),
					Value: pulumi.String("string"),
				},
			},
			TimeToLiveSpecification: &awsconnector.TimeToLiveSpecificationArgs{
				AttributeName: pulumi.String("string"),
				Enabled:       pulumi.Bool(false),
			},
		},
		AwsRegion:       pulumi.String("string"),
		AwsSourceSchema: pulumi.String("string"),
		AwsTags: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		PublicCloudConnectorsResourceId: pulumi.String("string"),
		PublicCloudResourceName:         pulumi.String("string"),
	},
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
})
var dynamoDbTableResource = new DynamoDbTable("dynamoDbTableResource", DynamoDbTableArgs.builder()
    .resourceGroupName("string")
    .location("string")
    .name("string")
    .properties(DynamoDBTablePropertiesArgs.builder()
        .arn("string")
        .awsAccountId("string")
        .awsProperties(AwsDynamoDBTablePropertiesArgs.builder()
            .arn("string")
            .attributeDefinitions(AttributeDefinitionArgs.builder()
                .attributeName("string")
                .attributeType("string")
                .build())
            .billingMode("string")
            .contributorInsightsSpecification(ContributorInsightsSpecificationArgs.builder()
                .enabled(false)
                .build())
            .deletionProtectionEnabled(false)
            .globalSecondaryIndexes(GlobalSecondaryIndexArgs.builder()
                .contributorInsightsSpecification(ContributorInsightsSpecificationArgs.builder()
                    .enabled(false)
                    .build())
                .indexName("string")
                .keySchema(KeySchemaArgs.builder()
                    .attributeName("string")
                    .keyType("string")
                    .build())
                .projection(ProjectionArgs.builder()
                    .nonKeyAttributes("string")
                    .projectionType("string")
                    .build())
                .provisionedThroughput(ProvisionedThroughputArgs.builder()
                    .readCapacityUnits(0)
                    .writeCapacityUnits(0)
                    .build())
                .build())
            .importSourceSpecification(ImportSourceSpecificationArgs.builder()
                .inputCompressionType("string")
                .inputFormat("string")
                .inputFormatOptions(InputFormatOptionsArgs.builder()
                    .csv(CsvArgs.builder()
                        .delimiter("string")
                        .headerList("string")
                        .build())
                    .build())
                .s3BucketSource(S3BucketSourceArgs.builder()
                    .s3Bucket("string")
                    .s3BucketOwner("string")
                    .s3KeyPrefix("string")
                    .build())
                .build())
            .keySchema(KeySchemaArgs.builder()
                .attributeName("string")
                .keyType("string")
                .build())
            .kinesisStreamSpecification(KinesisStreamSpecificationArgs.builder()
                .approximateCreationDateTimePrecision("string")
                .streamArn("string")
                .build())
            .localSecondaryIndexes(LocalSecondaryIndexArgs.builder()
                .indexName("string")
                .keySchema(KeySchemaArgs.builder()
                    .attributeName("string")
                    .keyType("string")
                    .build())
                .projection(ProjectionArgs.builder()
                    .nonKeyAttributes("string")
                    .projectionType("string")
                    .build())
                .build())
            .pointInTimeRecoverySpecification(PointInTimeRecoverySpecificationArgs.builder()
                .pointInTimeRecoveryEnabled(false)
                .build())
            .provisionedThroughput(ProvisionedThroughputArgs.builder()
                .readCapacityUnits(0)
                .writeCapacityUnits(0)
                .build())
            .resourcePolicy(ResourcePolicyArgs.builder()
                .policyDocument("any")
                .build())
            .sseSpecification(SSESpecificationArgs.builder()
                .kmsMasterKeyId("string")
                .sseEnabled(false)
                .sseType("string")
                .build())
            .streamArn("string")
            .streamSpecification(StreamSpecificationArgs.builder()
                .resourcePolicy(ResourcePolicyArgs.builder()
                    .policyDocument("any")
                    .build())
                .streamViewType("string")
                .build())
            .tableClass("string")
            .tableName("string")
            .tags(TagArgs.builder()
                .key("string")
                .value("string")
                .build())
            .timeToLiveSpecification(TimeToLiveSpecificationArgs.builder()
                .attributeName("string")
                .enabled(false)
                .build())
            .build())
        .awsRegion("string")
        .awsSourceSchema("string")
        .awsTags(Map.of("string", "string"))
        .publicCloudConnectorsResourceId("string")
        .publicCloudResourceName("string")
        .build())
    .tags(Map.of("string", "string"))
    .build());
dynamo_db_table_resource = azure_native.awsconnector.DynamoDbTable("dynamoDbTableResource",
    resource_group_name="string",
    location="string",
    name="string",
    properties={
        "arn": "string",
        "aws_account_id": "string",
        "aws_properties": {
            "arn": "string",
            "attribute_definitions": [{
                "attribute_name": "string",
                "attribute_type": "string",
            }],
            "billing_mode": "string",
            "contributor_insights_specification": {
                "enabled": False,
            },
            "deletion_protection_enabled": False,
            "global_secondary_indexes": [{
                "contributor_insights_specification": {
                    "enabled": False,
                },
                "index_name": "string",
                "key_schema": [{
                    "attribute_name": "string",
                    "key_type": "string",
                }],
                "projection": {
                    "non_key_attributes": ["string"],
                    "projection_type": "string",
                },
                "provisioned_throughput": {
                    "read_capacity_units": 0,
                    "write_capacity_units": 0,
                },
            }],
            "import_source_specification": {
                "input_compression_type": "string",
                "input_format": "string",
                "input_format_options": {
                    "csv": {
                        "delimiter": "string",
                        "header_list": ["string"],
                    },
                },
                "s3_bucket_source": {
                    "s3_bucket": "string",
                    "s3_bucket_owner": "string",
                    "s3_key_prefix": "string",
                },
            },
            "key_schema": [{
                "attribute_name": "string",
                "key_type": "string",
            }],
            "kinesis_stream_specification": {
                "approximate_creation_date_time_precision": "string",
                "stream_arn": "string",
            },
            "local_secondary_indexes": [{
                "index_name": "string",
                "key_schema": [{
                    "attribute_name": "string",
                    "key_type": "string",
                }],
                "projection": {
                    "non_key_attributes": ["string"],
                    "projection_type": "string",
                },
            }],
            "point_in_time_recovery_specification": {
                "point_in_time_recovery_enabled": False,
            },
            "provisioned_throughput": {
                "read_capacity_units": 0,
                "write_capacity_units": 0,
            },
            "resource_policy": {
                "policy_document": "any",
            },
            "sse_specification": {
                "kms_master_key_id": "string",
                "sse_enabled": False,
                "sse_type": "string",
            },
            "stream_arn": "string",
            "stream_specification": {
                "resource_policy": {
                    "policy_document": "any",
                },
                "stream_view_type": "string",
            },
            "table_class": "string",
            "table_name": "string",
            "tags": [{
                "key": "string",
                "value": "string",
            }],
            "time_to_live_specification": {
                "attribute_name": "string",
                "enabled": False,
            },
        },
        "aws_region": "string",
        "aws_source_schema": "string",
        "aws_tags": {
            "string": "string",
        },
        "public_cloud_connectors_resource_id": "string",
        "public_cloud_resource_name": "string",
    },
    tags={
        "string": "string",
    })
const dynamoDbTableResource = new azure_native.awsconnector.DynamoDbTable("dynamoDbTableResource", {
    resourceGroupName: "string",
    location: "string",
    name: "string",
    properties: {
        arn: "string",
        awsAccountId: "string",
        awsProperties: {
            arn: "string",
            attributeDefinitions: [{
                attributeName: "string",
                attributeType: "string",
            }],
            billingMode: "string",
            contributorInsightsSpecification: {
                enabled: false,
            },
            deletionProtectionEnabled: false,
            globalSecondaryIndexes: [{
                contributorInsightsSpecification: {
                    enabled: false,
                },
                indexName: "string",
                keySchema: [{
                    attributeName: "string",
                    keyType: "string",
                }],
                projection: {
                    nonKeyAttributes: ["string"],
                    projectionType: "string",
                },
                provisionedThroughput: {
                    readCapacityUnits: 0,
                    writeCapacityUnits: 0,
                },
            }],
            importSourceSpecification: {
                inputCompressionType: "string",
                inputFormat: "string",
                inputFormatOptions: {
                    csv: {
                        delimiter: "string",
                        headerList: ["string"],
                    },
                },
                s3BucketSource: {
                    s3Bucket: "string",
                    s3BucketOwner: "string",
                    s3KeyPrefix: "string",
                },
            },
            keySchema: [{
                attributeName: "string",
                keyType: "string",
            }],
            kinesisStreamSpecification: {
                approximateCreationDateTimePrecision: "string",
                streamArn: "string",
            },
            localSecondaryIndexes: [{
                indexName: "string",
                keySchema: [{
                    attributeName: "string",
                    keyType: "string",
                }],
                projection: {
                    nonKeyAttributes: ["string"],
                    projectionType: "string",
                },
            }],
            pointInTimeRecoverySpecification: {
                pointInTimeRecoveryEnabled: false,
            },
            provisionedThroughput: {
                readCapacityUnits: 0,
                writeCapacityUnits: 0,
            },
            resourcePolicy: {
                policyDocument: "any",
            },
            sseSpecification: {
                kmsMasterKeyId: "string",
                sseEnabled: false,
                sseType: "string",
            },
            streamArn: "string",
            streamSpecification: {
                resourcePolicy: {
                    policyDocument: "any",
                },
                streamViewType: "string",
            },
            tableClass: "string",
            tableName: "string",
            tags: [{
                key: "string",
                value: "string",
            }],
            timeToLiveSpecification: {
                attributeName: "string",
                enabled: false,
            },
        },
        awsRegion: "string",
        awsSourceSchema: "string",
        awsTags: {
            string: "string",
        },
        publicCloudConnectorsResourceId: "string",
        publicCloudResourceName: "string",
    },
    tags: {
        string: "string",
    },
});
type: azure-native:awsconnector:DynamoDbTable
properties:
    location: string
    name: string
    properties:
        arn: string
        awsAccountId: string
        awsProperties:
            arn: string
            attributeDefinitions:
                - attributeName: string
                  attributeType: string
            billingMode: string
            contributorInsightsSpecification:
                enabled: false
            deletionProtectionEnabled: false
            globalSecondaryIndexes:
                - contributorInsightsSpecification:
                    enabled: false
                  indexName: string
                  keySchema:
                    - attributeName: string
                      keyType: string
                  projection:
                    nonKeyAttributes:
                        - string
                    projectionType: string
                  provisionedThroughput:
                    readCapacityUnits: 0
                    writeCapacityUnits: 0
            importSourceSpecification:
                inputCompressionType: string
                inputFormat: string
                inputFormatOptions:
                    csv:
                        delimiter: string
                        headerList:
                            - string
                s3BucketSource:
                    s3Bucket: string
                    s3BucketOwner: string
                    s3KeyPrefix: string
            keySchema:
                - attributeName: string
                  keyType: string
            kinesisStreamSpecification:
                approximateCreationDateTimePrecision: string
                streamArn: string
            localSecondaryIndexes:
                - indexName: string
                  keySchema:
                    - attributeName: string
                      keyType: string
                  projection:
                    nonKeyAttributes:
                        - string
                    projectionType: string
            pointInTimeRecoverySpecification:
                pointInTimeRecoveryEnabled: false
            provisionedThroughput:
                readCapacityUnits: 0
                writeCapacityUnits: 0
            resourcePolicy:
                policyDocument: any
            sseSpecification:
                kmsMasterKeyId: string
                sseEnabled: false
                sseType: string
            streamArn: string
            streamSpecification:
                resourcePolicy:
                    policyDocument: any
                streamViewType: string
            tableClass: string
            tableName: string
            tags:
                - key: string
                  value: string
            timeToLiveSpecification:
                attributeName: string
                enabled: false
        awsRegion: string
        awsSourceSchema: string
        awsTags:
            string: string
        publicCloudConnectorsResourceId: string
        publicCloudResourceName: string
    resourceGroupName: string
    tags:
        string: string
DynamoDbTable Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The DynamoDbTable resource accepts the following input properties:
- ResourceGroup stringName 
- The name of the resource group. The name is case insensitive.
- Location string
- The geo-location where the resource lives
- Name string
- Name of DynamoDBTable
- Properties
Pulumi.Azure Native. Aws Connector. Inputs. Dynamo DBTable Properties 
- The resource-specific properties for this resource.
- Dictionary<string, string>
- Resource tags.
- ResourceGroup stringName 
- The name of the resource group. The name is case insensitive.
- Location string
- The geo-location where the resource lives
- Name string
- Name of DynamoDBTable
- Properties
DynamoDBTable Properties Args 
- The resource-specific properties for this resource.
- map[string]string
- Resource tags.
- resourceGroup StringName 
- The name of the resource group. The name is case insensitive.
- location String
- The geo-location where the resource lives
- name String
- Name of DynamoDBTable
- properties
DynamoDBTable Properties 
- The resource-specific properties for this resource.
- Map<String,String>
- Resource tags.
- resourceGroup stringName 
- The name of the resource group. The name is case insensitive.
- location string
- The geo-location where the resource lives
- name string
- Name of DynamoDBTable
- properties
DynamoDBTable Properties 
- The resource-specific properties for this resource.
- {[key: string]: string}
- Resource tags.
- resource_group_ strname 
- The name of the resource group. The name is case insensitive.
- location str
- The geo-location where the resource lives
- name str
- Name of DynamoDBTable
- properties
DynamoDBTable Properties Args 
- The resource-specific properties for this resource.
- Mapping[str, str]
- Resource tags.
- resourceGroup StringName 
- The name of the resource group. The name is case insensitive.
- location String
- The geo-location where the resource lives
- name String
- Name of DynamoDBTable
- properties Property Map
- The resource-specific properties for this resource.
- Map<String>
- Resource tags.
Outputs
All input properties are implicitly available as output properties. Additionally, the DynamoDbTable resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- SystemData Pulumi.Azure Native. Aws Connector. Outputs. System Data Response 
- Azure Resource Manager metadata containing createdBy and modifiedBy information.
- Type string
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- Id string
- The provider-assigned unique ID for this managed resource.
- SystemData SystemData Response 
- Azure Resource Manager metadata containing createdBy and modifiedBy information.
- Type string
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- id String
- The provider-assigned unique ID for this managed resource.
- systemData SystemData Response 
- Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type String
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- id string
- The provider-assigned unique ID for this managed resource.
- systemData SystemData Response 
- Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type string
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- id str
- The provider-assigned unique ID for this managed resource.
- system_data SystemData Response 
- Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type str
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
- id String
- The provider-assigned unique ID for this managed resource.
- systemData Property Map
- Azure Resource Manager metadata containing createdBy and modifiedBy information.
- type String
- The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Supporting Types
AttributeDefinition, AttributeDefinitionArgs    
- AttributeName string
- A name for the attribute.
- AttributeType string
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- AttributeName string
- A name for the attribute.
- AttributeType string
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- attributeName String
- A name for the attribute.
- attributeType String
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- attributeName string
- A name for the attribute.
- attributeType string
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- attribute_name str
- A name for the attribute.
- attribute_type str
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- attributeName String
- A name for the attribute.
- attributeType String
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
AttributeDefinitionResponse, AttributeDefinitionResponseArgs      
- AttributeName string
- A name for the attribute.
- AttributeType string
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- AttributeName string
- A name for the attribute.
- AttributeType string
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- attributeName String
- A name for the attribute.
- attributeType String
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- attributeName string
- A name for the attribute.
- attributeType string
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- attribute_name str
- A name for the attribute.
- attribute_type str
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
- attributeName String
- A name for the attribute.
- attributeType String
- The data type for the attribute, where: + S- the attribute is of type String +N- the attribute is of type Number +B- the attribute is of type Binary
AwsDynamoDBTableProperties, AwsDynamoDBTablePropertiesArgs        
- Arn string
- Property arn
- AttributeDefinitions List<Pulumi.Azure Native. Aws Connector. Inputs. Attribute Definition> 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- BillingMode string
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- ContributorInsights Pulumi.Specification Azure Native. Aws Connector. Inputs. Contributor Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- DeletionProtection boolEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- GlobalSecondary List<Pulumi.Indexes Azure Native. Aws Connector. Inputs. Global Secondary Index> 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- ImportSource Pulumi.Specification Azure Native. Aws Connector. Inputs. Import Source Specification 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- KeySchema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema> 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- KinesisStream Pulumi.Specification Azure Native. Aws Connector. Inputs. Kinesis Stream Specification 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- LocalSecondary List<Pulumi.Indexes Azure Native. Aws Connector. Inputs. Local Secondary Index> 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- PointIn Pulumi.Time Recovery Specification Azure Native. Aws Connector. Inputs. Point In Time Recovery Specification 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- ProvisionedThroughput Pulumi.Azure Native. Aws Connector. Inputs. Provisioned Throughput 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- ResourcePolicy Pulumi.Azure Native. Aws Connector. Inputs. Resource Policy 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- SseSpecification Pulumi.Azure Native. Aws Connector. Inputs. SSESpecification 
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- StreamArn string
- Property streamArn
- StreamSpecification Pulumi.Azure Native. Aws Connector. Inputs. Stream Specification 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- TableClass string
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- TableName string
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- 
List<Pulumi.Azure Native. Aws Connector. Inputs. Tag> 
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- TimeTo Pulumi.Live Specification Azure Native. Aws Connector. Inputs. Time To Live Specification 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- Arn string
- Property arn
- AttributeDefinitions []AttributeDefinition 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- BillingMode string
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- ContributorInsights ContributorSpecification Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- DeletionProtection boolEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- GlobalSecondary []GlobalIndexes Secondary Index 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- ImportSource ImportSpecification Source Specification 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- KeySchema []KeySchema 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- KinesisStream KinesisSpecification Stream Specification 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- LocalSecondary []LocalIndexes Secondary Index 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- PointIn PointTime Recovery Specification In Time Recovery Specification 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- ProvisionedThroughput ProvisionedThroughput 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- ResourcePolicy ResourcePolicy 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- SseSpecification SSESpecification
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- StreamArn string
- Property streamArn
- StreamSpecification StreamSpecification 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- TableClass string
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- TableName string
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- []Tag
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- TimeTo TimeLive Specification To Live Specification 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn String
- Property arn
- attributeDefinitions List<AttributeDefinition> 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billingMode String
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- contributorInsights ContributorSpecification Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletionProtection BooleanEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- globalSecondary List<GlobalIndexes Secondary Index> 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- importSource ImportSpecification Source Specification 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- keySchema List<KeySchema> 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- kinesisStream KinesisSpecification Stream Specification 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- localSecondary List<LocalIndexes Secondary Index> 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- pointIn PointTime Recovery Specification In Time Recovery Specification 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisionedThroughput ProvisionedThroughput 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- resourcePolicy ResourcePolicy 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sseSpecification SSESpecification
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- streamArn String
- Property streamArn
- streamSpecification StreamSpecification 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- tableClass String
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- tableName String
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- List<Tag>
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- timeTo TimeLive Specification To Live Specification 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn string
- Property arn
- attributeDefinitions AttributeDefinition[] 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billingMode string
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- contributorInsights ContributorSpecification Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletionProtection booleanEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- globalSecondary GlobalIndexes Secondary Index[] 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- importSource ImportSpecification Source Specification 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- keySchema KeySchema[] 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- kinesisStream KinesisSpecification Stream Specification 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- localSecondary LocalIndexes Secondary Index[] 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- pointIn PointTime Recovery Specification In Time Recovery Specification 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisionedThroughput ProvisionedThroughput 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- resourcePolicy ResourcePolicy 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sseSpecification SSESpecification
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- streamArn string
- Property streamArn
- streamSpecification StreamSpecification 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- tableClass string
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- tableName string
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- Tag[]
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- timeTo TimeLive Specification To Live Specification 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn str
- Property arn
- attribute_definitions Sequence[AttributeDefinition] 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing_mode str
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- contributor_insights_ Contributorspecification Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion_protection_ boolenabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global_secondary_ Sequence[Globalindexes Secondary Index] 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- import_source_ Importspecification Source Specification 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- key_schema Sequence[KeySchema] 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- kinesis_stream_ Kinesisspecification Stream Specification 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local_secondary_ Sequence[Localindexes Secondary Index] 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point_in_ Pointtime_ recovery_ specification In Time Recovery Specification 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned_throughput ProvisionedThroughput 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- resource_policy ResourcePolicy 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse_specification SSESpecification
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream_arn str
- Property streamArn
- stream_specification StreamSpecification 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table_class str
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- table_name str
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- Sequence[Tag]
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- time_to_ Timelive_ specification To Live Specification 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn String
- Property arn
- attributeDefinitions List<Property Map>
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billingMode String
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- contributorInsights Property MapSpecification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletionProtection BooleanEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- globalSecondary List<Property Map>Indexes 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- importSource Property MapSpecification 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- keySchema List<Property Map>
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- kinesisStream Property MapSpecification 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- localSecondary List<Property Map>Indexes 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- pointIn Property MapTime Recovery Specification 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisionedThroughput Property Map
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- resourcePolicy Property Map
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sseSpecification Property Map
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- streamArn String
- Property streamArn
- streamSpecification Property Map
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- tableClass String
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- tableName String
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- List<Property Map>
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- timeTo Property MapLive Specification 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
AwsDynamoDBTablePropertiesResponse, AwsDynamoDBTablePropertiesResponseArgs          
- Arn string
- Property arn
- AttributeDefinitions List<Pulumi.Azure Native. Aws Connector. Inputs. Attribute Definition Response> 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- BillingMode string
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- ContributorInsights Pulumi.Specification Azure Native. Aws Connector. Inputs. Contributor Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- DeletionProtection boolEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- GlobalSecondary List<Pulumi.Indexes Azure Native. Aws Connector. Inputs. Global Secondary Index Response> 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- ImportSource Pulumi.Specification Azure Native. Aws Connector. Inputs. Import Source Specification Response 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- KeySchema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema Response> 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- KinesisStream Pulumi.Specification Azure Native. Aws Connector. Inputs. Kinesis Stream Specification Response 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- LocalSecondary List<Pulumi.Indexes Azure Native. Aws Connector. Inputs. Local Secondary Index Response> 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- PointIn Pulumi.Time Recovery Specification Azure Native. Aws Connector. Inputs. Point In Time Recovery Specification Response 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- ProvisionedThroughput Pulumi.Azure Native. Aws Connector. Inputs. Provisioned Throughput Response 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- ResourcePolicy Pulumi.Azure Native. Aws Connector. Inputs. Resource Policy Response 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- SseSpecification Pulumi.Azure Native. Aws Connector. Inputs. SSESpecification Response 
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- StreamArn string
- Property streamArn
- StreamSpecification Pulumi.Azure Native. Aws Connector. Inputs. Stream Specification Response 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- TableClass string
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- TableName string
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- 
List<Pulumi.Azure Native. Aws Connector. Inputs. Tag Response> 
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- TimeTo Pulumi.Live Specification Azure Native. Aws Connector. Inputs. Time To Live Specification Response 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- Arn string
- Property arn
- AttributeDefinitions []AttributeDefinition Response 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- BillingMode string
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- ContributorInsights ContributorSpecification Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- DeletionProtection boolEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- GlobalSecondary []GlobalIndexes Secondary Index Response 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- ImportSource ImportSpecification Source Specification Response 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- KeySchema []KeySchema Response 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- KinesisStream KinesisSpecification Stream Specification Response 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- LocalSecondary []LocalIndexes Secondary Index Response 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- PointIn PointTime Recovery Specification In Time Recovery Specification Response 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- ProvisionedThroughput ProvisionedThroughput Response 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- ResourcePolicy ResourcePolicy Response 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- SseSpecification SSESpecificationResponse 
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- StreamArn string
- Property streamArn
- StreamSpecification StreamSpecification Response 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- TableClass string
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- TableName string
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- 
[]TagResponse 
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- TimeTo TimeLive Specification To Live Specification Response 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn String
- Property arn
- attributeDefinitions List<AttributeDefinition Response> 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billingMode String
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- contributorInsights ContributorSpecification Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletionProtection BooleanEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- globalSecondary List<GlobalIndexes Secondary Index Response> 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- importSource ImportSpecification Source Specification Response 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- keySchema List<KeySchema Response> 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- kinesisStream KinesisSpecification Stream Specification Response 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- localSecondary List<LocalIndexes Secondary Index Response> 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- pointIn PointTime Recovery Specification In Time Recovery Specification Response 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisionedThroughput ProvisionedThroughput Response 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- resourcePolicy ResourcePolicy Response 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sseSpecification SSESpecificationResponse 
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- streamArn String
- Property streamArn
- streamSpecification StreamSpecification Response 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- tableClass String
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- tableName String
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- 
List<TagResponse> 
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- timeTo TimeLive Specification To Live Specification Response 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn string
- Property arn
- attributeDefinitions AttributeDefinition Response[] 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billingMode string
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- contributorInsights ContributorSpecification Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletionProtection booleanEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- globalSecondary GlobalIndexes Secondary Index Response[] 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- importSource ImportSpecification Source Specification Response 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- keySchema KeySchema Response[] 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- kinesisStream KinesisSpecification Stream Specification Response 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- localSecondary LocalIndexes Secondary Index Response[] 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- pointIn PointTime Recovery Specification In Time Recovery Specification Response 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisionedThroughput ProvisionedThroughput Response 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- resourcePolicy ResourcePolicy Response 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sseSpecification SSESpecificationResponse 
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- streamArn string
- Property streamArn
- streamSpecification StreamSpecification Response 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- tableClass string
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- tableName string
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- 
TagResponse[] 
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- timeTo TimeLive Specification To Live Specification Response 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn str
- Property arn
- attribute_definitions Sequence[AttributeDefinition Response] 
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billing_mode str
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- contributor_insights_ Contributorspecification Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletion_protection_ boolenabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- global_secondary_ Sequence[Globalindexes Secondary Index Response] 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- import_source_ Importspecification Source Specification Response 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- key_schema Sequence[KeySchema Response] 
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- kinesis_stream_ Kinesisspecification Stream Specification Response 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- local_secondary_ Sequence[Localindexes Secondary Index Response] 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- point_in_ Pointtime_ recovery_ specification In Time Recovery Specification Response 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisioned_throughput ProvisionedThroughput Response 
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- resource_policy ResourcePolicy Response 
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sse_specification SSESpecificationResponse 
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- stream_arn str
- Property streamArn
- stream_specification StreamSpecification Response 
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- table_class str
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- table_name str
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- 
Sequence[TagResponse] 
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- time_to_ Timelive_ specification To Live Specification Response 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
- arn String
- Property arn
- attributeDefinitions List<Property Map>
- A list of attributes that describe the key schema for the table and indexes. This property is required to create a DDB table. Update requires: Some interruptions. Replacement if you edit an existing AttributeDefinition.
- billingMode String
- Specify how you are charged for read and write throughput and how you manage capacity. Valid values include: + PROVISIONED- We recommend usingPROVISIONEDfor predictable workloads.PROVISIONEDsets the billing mode to Provisioned Mode. +PAY_PER_REQUEST- We recommend usingPAY_PER_REQUESTfor unpredictable workloads.PAY_PER_REQUESTsets the billing mode to On-Demand Mode. If not specified, the default isPROVISIONED.
- contributorInsights Property MapSpecification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified table. The settings used to enable or disable CloudWatch Contributor Insights.
- deletionProtection BooleanEnabled 
- Determines if a table is protected from deletion. When enabled, the table cannot be deleted by any user or process. This setting is disabled by default. For more information, see Using deletion protection in the Developer Guide.
- globalSecondary List<Property Map>Indexes 
- Global secondary indexes to be created on the table. You can create up to 20 global secondary indexes. If you update a table to include a new global secondary index, CFNlong initiates the index creation and then proceeds with the stack update. CFNlong doesn't wait for the index to complete creation because the backfilling phase can take a long time, depending on the size of the table. You can't use the index or update the table until the index's status is ACTIVE. You can track its status by using the DynamoDB DescribeTable command. If you add or delete an index during an update, we recommend that you don't update any other resources. If your stack fails to update and is rolled back while adding a new index, you must manually delete the index. Updates are not supported. The following are exceptions: + If you update either the contributor insights specification or the provisioned throughput values of global secondary indexes, you can update the table without interruption. + You can delete or add one global secondary index without interruption. If you do both in the same update (for example, by changing the index's logical ID), the update fails.
- importSource Property MapSpecification 
- Specifies the properties of data being imported from the S3 bucket source to the table. If you specify the ImportSourceSpecificationproperty, and also specify either theStreamSpecification, theTableClassproperty, or theDeletionProtectionEnabledproperty, the IAM entity creating/updating stack must haveUpdateTablepermission. Specifies the properties of data being imported from the S3 bucket source to the table.
- keySchema List<Property Map>
- Specifies the attributes that make up the primary key for the table. The attributes in the KeySchemaproperty must also be defined in theAttributeDefinitionsproperty.
- kinesisStream Property MapSpecification 
- The Kinesis Data Streams configuration for the specified table. The Kinesis Data Streams configuration for the specified table.
- localSecondary List<Property Map>Indexes 
- Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.
- pointIn Property MapTime Recovery Specification 
- The settings used to enable point in time recovery. The settings used to enable point in time recovery.
- provisionedThroughput Property Map
- Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Amazon DynamoDB Table ProvisionedThroughput. If you setBillingModeasPROVISIONED, you must specify this property. If you setBillingModeasPAY_PER_REQUEST, you cannot specify this property. Throughput for the specified table, which consists of values forReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- resourcePolicy Property Map
- A resource-based policy document that contains permissions to add to the specified table. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. When you attach a resource-based policy while creating a table, the policy creation is strongly consistent. For information about the considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- sseSpecification Property Map
- Specifies the settings to enable server-side encryption. Represents the settings used to enable server-side encryption.
- streamArn String
- Property streamArn
- streamSpecification Property Map
- The settings for the DDB table stream, which capture changes to items stored in the table. Represents the DynamoDB Streams configuration for a table in DynamoDB.
- tableClass String
- The table class of the new table. Valid values are STANDARDandSTANDARD_INFREQUENT_ACCESS.
- tableName String
- A name for the table. If you don't specify a name, CFNlong generates a unique physical ID and uses that ID for the table name. For more information, see Name Type. If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.
- List<Property Map>
- An array of key-value pairs to apply to this resource. For more information, see Tag.
- timeTo Property MapLive Specification 
- Specifies the Time to Live (TTL) settings for the table. For detailed information about the limits in DynamoDB, see Limits in Amazon DynamoDB in the Amazon DynamoDB Developer Guide. Represents the settings used to enable or disable Time to Live (TTL) for the specified table.
ContributorInsightsSpecification, ContributorInsightsSpecificationArgs      
- Enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- Enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled Boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled Boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
ContributorInsightsSpecificationResponse, ContributorInsightsSpecificationResponseArgs        
- Enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- Enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled Boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled bool
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
- enabled Boolean
- Indicates whether CloudWatch Contributor Insights are to be enabled (true) or disabled (false).
Csv, CsvArgs  
- Delimiter string
- The delimiter used for separating items in the CSV file being imported.
- HeaderList List<string>
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- Delimiter string
- The delimiter used for separating items in the CSV file being imported.
- HeaderList []string
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter String
- The delimiter used for separating items in the CSV file being imported.
- headerList List<String>
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter string
- The delimiter used for separating items in the CSV file being imported.
- headerList string[]
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter str
- The delimiter used for separating items in the CSV file being imported.
- header_list Sequence[str]
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter String
- The delimiter used for separating items in the CSV file being imported.
- headerList List<String>
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
CsvResponse, CsvResponseArgs    
- Delimiter string
- The delimiter used for separating items in the CSV file being imported.
- HeaderList List<string>
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- Delimiter string
- The delimiter used for separating items in the CSV file being imported.
- HeaderList []string
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter String
- The delimiter used for separating items in the CSV file being imported.
- headerList List<String>
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter string
- The delimiter used for separating items in the CSV file being imported.
- headerList string[]
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter str
- The delimiter used for separating items in the CSV file being imported.
- header_list Sequence[str]
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
- delimiter String
- The delimiter used for separating items in the CSV file being imported.
- headerList List<String>
- List of the headers used to specify a common header for all source CSV files being imported. If this field is specified then the first line of each CSV file is treated as data instead of the header. If this field is not specified the the first line of each CSV file is treated as the header.
DynamoDBTableProperties, DynamoDBTablePropertiesArgs      
- Arn string
- Amazon Resource Name (ARN)
- AwsAccount stringId 
- AWS Account ID
- AwsProperties Pulumi.Azure Native. Aws Connector. Inputs. Aws Dynamo DBTable Properties 
- AWS Properties
- AwsRegion string
- AWS Region
- AwsSource stringSchema 
- AWS Source Schema
- Dictionary<string, string>
- AWS Tags
- PublicCloud stringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- PublicCloud stringResource Name 
- Public Cloud Resource Name
- Arn string
- Amazon Resource Name (ARN)
- AwsAccount stringId 
- AWS Account ID
- AwsProperties AwsDynamo DBTable Properties 
- AWS Properties
- AwsRegion string
- AWS Region
- AwsSource stringSchema 
- AWS Source Schema
- map[string]string
- AWS Tags
- PublicCloud stringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- PublicCloud stringResource Name 
- Public Cloud Resource Name
- arn String
- Amazon Resource Name (ARN)
- awsAccount StringId 
- AWS Account ID
- awsProperties AwsDynamo DBTable Properties 
- AWS Properties
- awsRegion String
- AWS Region
- awsSource StringSchema 
- AWS Source Schema
- Map<String,String>
- AWS Tags
- publicCloud StringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- publicCloud StringResource Name 
- Public Cloud Resource Name
- arn string
- Amazon Resource Name (ARN)
- awsAccount stringId 
- AWS Account ID
- awsProperties AwsDynamo DBTable Properties 
- AWS Properties
- awsRegion string
- AWS Region
- awsSource stringSchema 
- AWS Source Schema
- {[key: string]: string}
- AWS Tags
- publicCloud stringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- publicCloud stringResource Name 
- Public Cloud Resource Name
- arn str
- Amazon Resource Name (ARN)
- aws_account_ strid 
- AWS Account ID
- aws_properties AwsDynamo DBTable Properties 
- AWS Properties
- aws_region str
- AWS Region
- aws_source_ strschema 
- AWS Source Schema
- Mapping[str, str]
- AWS Tags
- public_cloud_ strconnectors_ resource_ id 
- Public Cloud Connectors Resource ID
- public_cloud_ strresource_ name 
- Public Cloud Resource Name
- arn String
- Amazon Resource Name (ARN)
- awsAccount StringId 
- AWS Account ID
- awsProperties Property Map
- AWS Properties
- awsRegion String
- AWS Region
- awsSource StringSchema 
- AWS Source Schema
- Map<String>
- AWS Tags
- publicCloud StringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- publicCloud StringResource Name 
- Public Cloud Resource Name
DynamoDBTablePropertiesResponse, DynamoDBTablePropertiesResponseArgs        
- ProvisioningState string
- The status of the last operation.
- Arn string
- Amazon Resource Name (ARN)
- AwsAccount stringId 
- AWS Account ID
- AwsProperties Pulumi.Azure Native. Aws Connector. Inputs. Aws Dynamo DBTable Properties Response 
- AWS Properties
- AwsRegion string
- AWS Region
- AwsSource stringSchema 
- AWS Source Schema
- Dictionary<string, string>
- AWS Tags
- PublicCloud stringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- PublicCloud stringResource Name 
- Public Cloud Resource Name
- ProvisioningState string
- The status of the last operation.
- Arn string
- Amazon Resource Name (ARN)
- AwsAccount stringId 
- AWS Account ID
- AwsProperties AwsDynamo DBTable Properties Response 
- AWS Properties
- AwsRegion string
- AWS Region
- AwsSource stringSchema 
- AWS Source Schema
- map[string]string
- AWS Tags
- PublicCloud stringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- PublicCloud stringResource Name 
- Public Cloud Resource Name
- provisioningState String
- The status of the last operation.
- arn String
- Amazon Resource Name (ARN)
- awsAccount StringId 
- AWS Account ID
- awsProperties AwsDynamo DBTable Properties Response 
- AWS Properties
- awsRegion String
- AWS Region
- awsSource StringSchema 
- AWS Source Schema
- Map<String,String>
- AWS Tags
- publicCloud StringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- publicCloud StringResource Name 
- Public Cloud Resource Name
- provisioningState string
- The status of the last operation.
- arn string
- Amazon Resource Name (ARN)
- awsAccount stringId 
- AWS Account ID
- awsProperties AwsDynamo DBTable Properties Response 
- AWS Properties
- awsRegion string
- AWS Region
- awsSource stringSchema 
- AWS Source Schema
- {[key: string]: string}
- AWS Tags
- publicCloud stringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- publicCloud stringResource Name 
- Public Cloud Resource Name
- provisioning_state str
- The status of the last operation.
- arn str
- Amazon Resource Name (ARN)
- aws_account_ strid 
- AWS Account ID
- aws_properties AwsDynamo DBTable Properties Response 
- AWS Properties
- aws_region str
- AWS Region
- aws_source_ strschema 
- AWS Source Schema
- Mapping[str, str]
- AWS Tags
- public_cloud_ strconnectors_ resource_ id 
- Public Cloud Connectors Resource ID
- public_cloud_ strresource_ name 
- Public Cloud Resource Name
- provisioningState String
- The status of the last operation.
- arn String
- Amazon Resource Name (ARN)
- awsAccount StringId 
- AWS Account ID
- awsProperties Property Map
- AWS Properties
- awsRegion String
- AWS Region
- awsSource StringSchema 
- AWS Source Schema
- Map<String>
- AWS Tags
- publicCloud StringConnectors Resource Id 
- Public Cloud Connectors Resource ID
- publicCloud StringResource Name 
- Public Cloud Resource Name
GlobalSecondaryIndex, GlobalSecondaryIndexArgs      
- ContributorInsights Pulumi.Specification Azure Native. Aws Connector. Inputs. Contributor Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- IndexName string
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- KeySchema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema> 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Projection
Pulumi.Azure Native. Aws Connector. Inputs. Projection 
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- ProvisionedThroughput Pulumi.Azure Native. Aws Connector. Inputs. Provisioned Throughput 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- ContributorInsights ContributorSpecification Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- IndexName string
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- KeySchema []KeySchema 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Projection Projection
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- ProvisionedThroughput ProvisionedThroughput 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributorInsights ContributorSpecification Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- indexName String
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- keySchema List<KeySchema> 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Projection
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisionedThroughput ProvisionedThroughput 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributorInsights ContributorSpecification Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- indexName string
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- keySchema KeySchema[] 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Projection
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisionedThroughput ProvisionedThroughput 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor_insights_ Contributorspecification Insights Specification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index_name str
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- key_schema Sequence[KeySchema] 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Projection
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned_throughput ProvisionedThroughput 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributorInsights Property MapSpecification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- indexName String
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- keySchema List<Property Map>
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Property Map
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisionedThroughput Property Map
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
GlobalSecondaryIndexResponse, GlobalSecondaryIndexResponseArgs        
- ContributorInsights Pulumi.Specification Azure Native. Aws Connector. Inputs. Contributor Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- IndexName string
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- KeySchema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema Response> 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Projection
Pulumi.Azure Native. Aws Connector. Inputs. Projection Response 
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- ProvisionedThroughput Pulumi.Azure Native. Aws Connector. Inputs. Provisioned Throughput Response 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- ContributorInsights ContributorSpecification Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- IndexName string
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- KeySchema []KeySchema Response 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Projection
ProjectionResponse 
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- ProvisionedThroughput ProvisionedThroughput Response 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributorInsights ContributorSpecification Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- indexName String
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- keySchema List<KeySchema Response> 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection
ProjectionResponse 
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisionedThroughput ProvisionedThroughput Response 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributorInsights ContributorSpecification Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- indexName string
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- keySchema KeySchema Response[] 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection
ProjectionResponse 
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisionedThroughput ProvisionedThroughput Response 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributor_insights_ Contributorspecification Insights Specification Response 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- index_name str
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- key_schema Sequence[KeySchema Response] 
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection
ProjectionResponse 
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisioned_throughput ProvisionedThroughput Response 
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
- contributorInsights Property MapSpecification 
- The settings used to enable or disable CloudWatch Contributor Insights for the specified global secondary index. The settings used to enable or disable CloudWatch Contributor Insights.
- indexName String
- The name of the global secondary index. The name must be unique among all other indexes on this table.
- keySchema List<Property Map>
- The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Property Map
- Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- provisionedThroughput Property Map
- Represents the provisioned throughput settings for the specified global secondary index. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. Throughput for the specified table, which consists of values for ReadCapacityUnitsandWriteCapacityUnits. For more information about the contents of a provisioned throughput structure, see Table ProvisionedThroughput.
ImportSourceSpecification, ImportSourceSpecificationArgs      
- InputCompression stringType 
- Type of compression to be used on the input coming from the imported table.
- InputFormat string
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- InputFormat Pulumi.Options Azure Native. Aws Connector. Inputs. Input Format Options 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- S3BucketSource Pulumi.Azure Native. Aws Connector. Inputs. S3Bucket Source 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- InputCompression stringType 
- Type of compression to be used on the input coming from the imported table.
- InputFormat string
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- InputFormat InputOptions Format Options 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- S3BucketSource S3BucketSource 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- inputCompression StringType 
- Type of compression to be used on the input coming from the imported table.
- inputFormat String
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- inputFormat InputOptions Format Options 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3BucketSource S3BucketSource 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- inputCompression stringType 
- Type of compression to be used on the input coming from the imported table.
- inputFormat string
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- inputFormat InputOptions Format Options 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3BucketSource S3BucketSource 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input_compression_ strtype 
- Type of compression to be used on the input coming from the imported table.
- input_format str
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- input_format_ Inputoptions Format Options 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3_bucket_ S3Bucketsource Source 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- inputCompression StringType 
- Type of compression to be used on the input coming from the imported table.
- inputFormat String
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- inputFormat Property MapOptions 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3BucketSource Property Map
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
ImportSourceSpecificationResponse, ImportSourceSpecificationResponseArgs        
- InputCompression stringType 
- Type of compression to be used on the input coming from the imported table.
- InputFormat string
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- InputFormat Pulumi.Options Azure Native. Aws Connector. Inputs. Input Format Options Response 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- S3BucketSource Pulumi.Azure Native. Aws Connector. Inputs. S3Bucket Source Response 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- InputCompression stringType 
- Type of compression to be used on the input coming from the imported table.
- InputFormat string
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- InputFormat InputOptions Format Options Response 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- S3BucketSource S3BucketSource Response 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- inputCompression StringType 
- Type of compression to be used on the input coming from the imported table.
- inputFormat String
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- inputFormat InputOptions Format Options Response 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3BucketSource S3BucketSource Response 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- inputCompression stringType 
- Type of compression to be used on the input coming from the imported table.
- inputFormat string
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- inputFormat InputOptions Format Options Response 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3BucketSource S3BucketSource Response 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- input_compression_ strtype 
- Type of compression to be used on the input coming from the imported table.
- input_format str
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- input_format_ Inputoptions Format Options Response 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3_bucket_ S3Bucketsource Source Response 
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
- inputCompression StringType 
- Type of compression to be used on the input coming from the imported table.
- inputFormat String
- The format of the source data. Valid values for ImportFormatareCSV,DYNAMODB_JSONorION.
- inputFormat Property MapOptions 
- Additional properties that specify how the input is formatted, The format options for the data that was imported into the target table. There is one value, CsvOption.
- s3BucketSource Property Map
- The S3 bucket that provides the source for the import. The S3 bucket that is being imported from.
InputFormatOptions, InputFormatOptionsArgs      
- Csv
Pulumi.Azure Native. Aws Connector. Inputs. Csv 
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv Property Map
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
InputFormatOptionsResponse, InputFormatOptionsResponseArgs        
- Csv
Pulumi.Azure Native. Aws Connector. Inputs. Csv Response 
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- Csv
CsvResponse 
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv
CsvResponse 
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv
CsvResponse 
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv
CsvResponse 
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
- csv Property Map
- The options for imported source files in CSV format. The values are Delimiter and HeaderList. The options for imported source files in CSV format. The values are Delimiter and HeaderList.
KeySchema, KeySchemaArgs    
- AttributeName string
- The name of a key attribute.
- KeyType string
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- AttributeName string
- The name of a key attribute.
- KeyType string
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attributeName String
- The name of a key attribute.
- keyType String
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attributeName string
- The name of a key attribute.
- keyType string
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute_name str
- The name of a key attribute.
- key_type str
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attributeName String
- The name of a key attribute.
- keyType String
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
KeySchemaResponse, KeySchemaResponseArgs      
- AttributeName string
- The name of a key attribute.
- KeyType string
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- AttributeName string
- The name of a key attribute.
- KeyType string
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attributeName String
- The name of a key attribute.
- keyType String
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attributeName string
- The name of a key attribute.
- keyType string
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attribute_name str
- The name of a key attribute.
- key_type str
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- attributeName String
- The name of a key attribute.
- keyType String
- The role that this key attribute will assume: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
KinesisStreamSpecification, KinesisStreamSpecificationArgs      
- ApproximateCreation string | Pulumi.Date Time Precision Azure Native. Aws Connector. Kinesis Stream Specification Approximate Creation Date Time Precision 
- The precision for the time and date that the stream was created.
- StreamArn string
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- ApproximateCreation string | KinesisDate Time Precision Stream Specification Approximate Creation Date Time Precision 
- The precision for the time and date that the stream was created.
- StreamArn string
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximateCreation String | KinesisDate Time Precision Stream Specification Approximate Creation Date Time Precision 
- The precision for the time and date that the stream was created.
- streamArn String
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximateCreation string | KinesisDate Time Precision Stream Specification Approximate Creation Date Time Precision 
- The precision for the time and date that the stream was created.
- streamArn string
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate_creation_ str | Kinesisdate_ time_ precision Stream Specification Approximate Creation Date Time Precision 
- The precision for the time and date that the stream was created.
- stream_arn str
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximateCreation String | "MICROSECOND" | "MILLISECOND"Date Time Precision 
- The precision for the time and date that the stream was created.
- streamArn String
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
KinesisStreamSpecificationApproximateCreationDateTimePrecision, KinesisStreamSpecificationApproximateCreationDateTimePrecisionArgs                
- MICROSECOND
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- MILLISECOND
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- KinesisStream Specification Approximate Creation Date Time Precision MICROSECOND 
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- KinesisStream Specification Approximate Creation Date Time Precision MILLISECOND 
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- MICROSECOND
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- MILLISECOND
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- MICROSECOND
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- MILLISECOND
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- MICROSECOND
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- MILLISECOND
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
- "MICROSECOND"
- MICROSECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MICROSECOND
- "MILLISECOND"
- MILLISECONDKinesisStreamSpecificationApproximateCreationDateTimePrecision enum MILLISECOND
KinesisStreamSpecificationResponse, KinesisStreamSpecificationResponseArgs        
- ApproximateCreation stringDate Time Precision 
- The precision for the time and date that the stream was created.
- StreamArn string
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- ApproximateCreation stringDate Time Precision 
- The precision for the time and date that the stream was created.
- StreamArn string
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximateCreation StringDate Time Precision 
- The precision for the time and date that the stream was created.
- streamArn String
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximateCreation stringDate Time Precision 
- The precision for the time and date that the stream was created.
- streamArn string
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximate_creation_ strdate_ time_ precision 
- The precision for the time and date that the stream was created.
- stream_arn str
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
- approximateCreation StringDate Time Precision 
- The precision for the time and date that the stream was created.
- streamArn String
- The ARN for a specific Kinesis data stream. Length Constraints: Minimum length of 37. Maximum length of 1024.
LocalSecondaryIndex, LocalSecondaryIndexArgs      
- IndexName string
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- KeySchema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema> 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Projection
Pulumi.Azure Native. Aws Connector. Inputs. Projection 
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- IndexName string
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- KeySchema []KeySchema 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Projection Projection
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- indexName String
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- keySchema List<KeySchema> 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Projection
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- indexName string
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- keySchema KeySchema[] 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Projection
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index_name str
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- key_schema Sequence[KeySchema] 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Projection
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- indexName String
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- keySchema List<Property Map>
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Property Map
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
LocalSecondaryIndexResponse, LocalSecondaryIndexResponseArgs        
- IndexName string
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- KeySchema List<Pulumi.Azure Native. Aws Connector. Inputs. Key Schema Response> 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Projection
Pulumi.Azure Native. Aws Connector. Inputs. Projection Response 
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- IndexName string
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- KeySchema []KeySchema Response 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- Projection
ProjectionResponse 
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- indexName String
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- keySchema List<KeySchema Response> 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection
ProjectionResponse 
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- indexName string
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- keySchema KeySchema Response[] 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection
ProjectionResponse 
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- index_name str
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- key_schema Sequence[KeySchema Response] 
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection
ProjectionResponse 
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
- indexName String
- The name of the local secondary index. The name must be unique among all other indexes on this table.
- keySchema List<Property Map>
- The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types: + HASH- partition key +RANGE- sort key The partition key of an item is also known as its hash attribute. The term 'hash attribute' derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. The sort key of an item is also known as its range attribute. The term 'range attribute' derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
- projection Property Map
- Represents attributes that are copied (projected) from the table into the local secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
PointInTimeRecoverySpecification, PointInTimeRecoverySpecificationArgs          
- PointIn boolTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- PointIn boolTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- pointIn BooleanTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- pointIn booleanTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point_in_ booltime_ recovery_ enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- pointIn BooleanTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
PointInTimeRecoverySpecificationResponse, PointInTimeRecoverySpecificationResponseArgs            
- PointIn boolTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- PointIn boolTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- pointIn BooleanTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- pointIn booleanTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- point_in_ booltime_ recovery_ enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
- pointIn BooleanTime Recovery Enabled 
- Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
Projection, ProjectionArgs  
- NonKey List<string>Attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- ProjectionType string
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- NonKey []stringAttributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- ProjectionType string
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- nonKey List<String>Attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- projectionType String
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- nonKey string[]Attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- projectionType string
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- non_key_ Sequence[str]attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- projection_type str
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- nonKey List<String>Attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- projectionType String
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
ProjectionResponse, ProjectionResponseArgs    
- NonKey List<string>Attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- ProjectionType string
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- NonKey []stringAttributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- ProjectionType string
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- nonKey List<String>Attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- projectionType String
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- nonKey string[]Attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- projectionType string
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- non_key_ Sequence[str]attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- projection_type str
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
- nonKey List<String>Attributes 
- Represents the non-key attribute names which will be projected into the index. For local secondary indexes, the total count of NonKeyAttributessummed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
- projectionType String
- The set of attributes that are projected into the index: + KEYS_ONLY- Only the index and primary keys are projected into the index. +INCLUDE- In addition to the attributes described inKEYS_ONLY, the secondary index will include other non-key attributes that you specify. +ALL- All of the table attributes are projected into the index. When using the DynamoDB console,ALLis selected by default.
ProvisionedThroughput, ProvisionedThroughputArgs    
- ReadCapacity intUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- WriteCapacity intUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- ReadCapacity intUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- WriteCapacity intUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- readCapacity IntegerUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- writeCapacity IntegerUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- readCapacity numberUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- writeCapacity numberUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- read_capacity_ intunits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- write_capacity_ intunits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- readCapacity NumberUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- writeCapacity NumberUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
ProvisionedThroughputResponse, ProvisionedThroughputResponseArgs      
- ReadCapacity intUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- WriteCapacity intUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- ReadCapacity intUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- WriteCapacity intUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- readCapacity IntegerUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- writeCapacity IntegerUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- readCapacity numberUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- writeCapacity numberUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- read_capacity_ intunits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- write_capacity_ intunits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- readCapacity NumberUnits 
- The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
- writeCapacity NumberUnits 
- The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB Developer Guide. If read/write capacity mode isPAY_PER_REQUESTthe value is set to 0.
ResourcePolicy, ResourcePolicyArgs    
- PolicyDocument object
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- PolicyDocument interface{}
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policyDocument Object
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policyDocument any
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy_document Any
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policyDocument Any
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
ResourcePolicyResponse, ResourcePolicyResponseArgs      
- PolicyDocument object
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- PolicyDocument interface{}
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policyDocument Object
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policyDocument any
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policy_document Any
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
- policyDocument Any
- A resource-based policy document that contains permissions to add to the specified DDB table, index, or both. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples.
S3BucketSource, S3BucketSourceArgs    
- S3Bucket string
- The S3 bucket that is being imported from.
- S3BucketOwner string
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- S3KeyPrefix string
- The key prefix shared by all S3 Objects that are being imported.
- S3Bucket string
- The S3 bucket that is being imported from.
- S3BucketOwner string
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- S3KeyPrefix string
- The key prefix shared by all S3 Objects that are being imported.
- s3Bucket String
- The S3 bucket that is being imported from.
- s3BucketOwner String
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3KeyPrefix String
- The key prefix shared by all S3 Objects that are being imported.
- s3Bucket string
- The S3 bucket that is being imported from.
- s3BucketOwner string
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3KeyPrefix string
- The key prefix shared by all S3 Objects that are being imported.
- s3_bucket str
- The S3 bucket that is being imported from.
- s3_bucket_ strowner 
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3_key_ strprefix 
- The key prefix shared by all S3 Objects that are being imported.
- s3Bucket String
- The S3 bucket that is being imported from.
- s3BucketOwner String
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3KeyPrefix String
- The key prefix shared by all S3 Objects that are being imported.
S3BucketSourceResponse, S3BucketSourceResponseArgs      
- S3Bucket string
- The S3 bucket that is being imported from.
- S3BucketOwner string
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- S3KeyPrefix string
- The key prefix shared by all S3 Objects that are being imported.
- S3Bucket string
- The S3 bucket that is being imported from.
- S3BucketOwner string
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- S3KeyPrefix string
- The key prefix shared by all S3 Objects that are being imported.
- s3Bucket String
- The S3 bucket that is being imported from.
- s3BucketOwner String
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3KeyPrefix String
- The key prefix shared by all S3 Objects that are being imported.
- s3Bucket string
- The S3 bucket that is being imported from.
- s3BucketOwner string
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3KeyPrefix string
- The key prefix shared by all S3 Objects that are being imported.
- s3_bucket str
- The S3 bucket that is being imported from.
- s3_bucket_ strowner 
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3_key_ strprefix 
- The key prefix shared by all S3 Objects that are being imported.
- s3Bucket String
- The S3 bucket that is being imported from.
- s3BucketOwner String
- The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.
- s3KeyPrefix String
- The key prefix shared by all S3 Objects that are being imported.
SSESpecification, SSESpecificationArgs  
- KmsMaster stringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- SseEnabled bool
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- SseType string
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- KmsMaster stringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- SseEnabled bool
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- SseType string
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kmsMaster StringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- sseEnabled Boolean
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- sseType String
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kmsMaster stringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- sseEnabled boolean
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- sseType string
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms_master_ strkey_ id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- sse_enabled bool
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- sse_type str
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kmsMaster StringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- sseEnabled Boolean
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- sseType String
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
SSESpecificationResponse, SSESpecificationResponseArgs    
- KmsMaster stringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- SseEnabled bool
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- SseType string
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- KmsMaster stringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- SseEnabled bool
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- SseType string
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kmsMaster StringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- sseEnabled Boolean
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- sseType String
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kmsMaster stringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- sseEnabled boolean
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- sseType string
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kms_master_ strkey_ id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- sse_enabled bool
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- sse_type str
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
- kmsMaster StringKey Id 
- The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb.
- sseEnabled Boolean
- Indicates whether server-side encryption is done using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMSand an AWS managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned key.
- sseType String
- Server-side encryption type. The only supported value is: + KMS- Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
StreamSpecification, StreamSpecificationArgs    
- ResourcePolicy Pulumi.Azure Native. Aws Connector. Inputs. Resource Policy 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- StreamView stringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- ResourcePolicy ResourcePolicy 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- StreamView stringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- resourcePolicy ResourcePolicy 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- streamView StringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- resourcePolicy ResourcePolicy 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- streamView stringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- resource_policy ResourcePolicy 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream_view_ strtype 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- resourcePolicy Property Map
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- streamView StringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
StreamSpecificationResponse, StreamSpecificationResponseArgs      
- ResourcePolicy Pulumi.Azure Native. Aws Connector. Inputs. Resource Policy Response 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- StreamView stringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- ResourcePolicy ResourcePolicy Response 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- StreamView stringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- resourcePolicy ResourcePolicy Response 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- streamView StringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- resourcePolicy ResourcePolicy Response 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- streamView stringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- resource_policy ResourcePolicy Response 
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- stream_view_ strtype 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
- resourcePolicy Property Map
- Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. Creates or updates a resource-based policy document that contains the permissions for DDB resources, such as a table, its indexes, and stream. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource. In a CFNshort template, you can provide the policy in JSON or YAML format because CFNshort converts YAML to JSON before submitting it to DDB. For more information about resource-based policies, see Using resource-based policies for and Resource-based policy examples. While defining resource-based policies in your CFNshort templates, the following considerations apply: + The maximum size supported for a resource-based policy document in JSON format is 20 KB. DDB counts whitespaces when calculating the size of a policy against this limit. + Resource-based policies don't support drift detection. If you update a policy outside of the CFNshort stack template, you'll need to update the CFNshort stack with the changes. + Resource-based policies don't support out-of-band changes. If you add, update, or delete a policy outside of the CFNshort template, the change won't be overwritten if there are no changes to the policy within the template. For example, say that your template contains a resource-based policy, which you later update outside of the template. If you don't make any changes to the policy in the template, the updated policy in DDB won’t be synced with the policy in the template. Conversely, say that your template doesn’t contain a resource-based policy, but you add a policy outside of the template. This policy won’t be removed from DDB as long as you don’t add it to the template. When you add a policy to the template and update the stack, the existing policy in DDB will be updated to match the one defined in the template. For a full list of all considerations, see Resource-based policy considerations.
- streamView StringType 
- When an item in the table is modified, StreamViewTypedetermines what information is written to the stream for this table. Valid values forStreamViewTypeare: +KEYS_ONLY- Only the key attributes of the modified item are written to the stream. +NEW_IMAGE- The entire item, as it appears after it was modified, is written to the stream. +OLD_IMAGE- The entire item, as it appeared before it was modified, is written to the stream. +NEW_AND_OLD_IMAGES- Both the new and the old item images of the item are written to the stream.
SystemDataResponse, SystemDataResponseArgs      
- CreatedAt string
- The timestamp of resource creation (UTC).
- CreatedBy string
- The identity that created the resource.
- CreatedBy stringType 
- The type of identity that created the resource.
- LastModified stringAt 
- The timestamp of resource last modification (UTC)
- LastModified stringBy 
- The identity that last modified the resource.
- LastModified stringBy Type 
- The type of identity that last modified the resource.
- CreatedAt string
- The timestamp of resource creation (UTC).
- CreatedBy string
- The identity that created the resource.
- CreatedBy stringType 
- The type of identity that created the resource.
- LastModified stringAt 
- The timestamp of resource last modification (UTC)
- LastModified stringBy 
- The identity that last modified the resource.
- LastModified stringBy Type 
- The type of identity that last modified the resource.
- createdAt String
- The timestamp of resource creation (UTC).
- createdBy String
- The identity that created the resource.
- createdBy StringType 
- The type of identity that created the resource.
- lastModified StringAt 
- The timestamp of resource last modification (UTC)
- lastModified StringBy 
- The identity that last modified the resource.
- lastModified StringBy Type 
- The type of identity that last modified the resource.
- createdAt string
- The timestamp of resource creation (UTC).
- createdBy string
- The identity that created the resource.
- createdBy stringType 
- The type of identity that created the resource.
- lastModified stringAt 
- The timestamp of resource last modification (UTC)
- lastModified stringBy 
- The identity that last modified the resource.
- lastModified stringBy Type 
- The type of identity that last modified the resource.
- created_at str
- The timestamp of resource creation (UTC).
- created_by str
- The identity that created the resource.
- created_by_ strtype 
- The type of identity that created the resource.
- last_modified_ strat 
- The timestamp of resource last modification (UTC)
- last_modified_ strby 
- The identity that last modified the resource.
- last_modified_ strby_ type 
- The type of identity that last modified the resource.
- createdAt String
- The timestamp of resource creation (UTC).
- createdBy String
- The identity that created the resource.
- createdBy StringType 
- The type of identity that created the resource.
- lastModified StringAt 
- The timestamp of resource last modification (UTC)
- lastModified StringBy 
- The identity that last modified the resource.
- lastModified StringBy Type 
- The type of identity that last modified the resource.
Tag, TagArgs  
- Key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key String
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value String
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key str
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value str
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key String
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value String
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
TagResponse, TagResponseArgs    
- Key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- Value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key String
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value String
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key string
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value string
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key str
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value str
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- key String
- The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
- value String
- The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
TimeToLiveSpecification, TimeToLiveSpecificationArgs        
- AttributeName string
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- Enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- AttributeName string
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- Enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attributeName String
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- enabled Boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attributeName string
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- enabled boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute_name str
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attributeName String
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- enabled Boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
TimeToLiveSpecificationResponse, TimeToLiveSpecificationResponseArgs          
- AttributeName string
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- Enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- AttributeName string
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- Enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attributeName String
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- enabled Boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attributeName string
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- enabled boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attribute_name str
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- enabled bool
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
- attributeName String
- The name of the TTL attribute used to store the expiration time for items in the table. + The AttributeNameproperty is required when enabling the TTL, or when TTL is already enabled. + To update this property, you must first disable TTL and then enable TTL with the new attribute name.
- enabled Boolean
- Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
Import
An existing resource can be imported using its type token, name, and identifier, e.g.
$ pulumi import azure-native:awsconnector:DynamoDbTable wjhshaxtpxprmkvirlnkg /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AwsConnector/dynamoDBTables/{name} 
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Azure Native pulumi/pulumi-azure-native
- License
- Apache-2.0