gcp.dataplex.Datascan
Explore with Pulumi AI
Represents a user-visible job which provides the insights for the related data source.
To get more information about Datascan, see:
- API documentation
- How-to Guides
Example Usage
Dataplex Datascan Basic Profile
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const basicProfile = new gcp.dataplex.Datascan("basic_profile", {
    location: "us-central1",
    dataScanId: "dataprofile-basic",
    data: {
        resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
    },
    executionSpec: {
        trigger: {
            onDemand: {},
        },
    },
    dataProfileSpec: {},
    project: "my-project-name",
});
import pulumi
import pulumi_gcp as gcp
basic_profile = gcp.dataplex.Datascan("basic_profile",
    location="us-central1",
    data_scan_id="dataprofile-basic",
    data={
        "resource": "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
    },
    execution_spec={
        "trigger": {
            "on_demand": {},
        },
    },
    data_profile_spec={},
    project="my-project-name")
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataplex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataplex.NewDatascan(ctx, "basic_profile", &dataplex.DatascanArgs{
			Location:   pulumi.String("us-central1"),
			DataScanId: pulumi.String("dataprofile-basic"),
			Data: &dataplex.DatascanDataArgs{
				Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare"),
			},
			ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
				Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
					OnDemand: &dataplex.DatascanExecutionSpecTriggerOnDemandArgs{},
				},
			},
			DataProfileSpec: &dataplex.DatascanDataProfileSpecArgs{},
			Project:         pulumi.String("my-project-name"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var basicProfile = new Gcp.DataPlex.Datascan("basic_profile", new()
    {
        Location = "us-central1",
        DataScanId = "dataprofile-basic",
        Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
        {
            Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
        },
        ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
        {
            Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
            {
                OnDemand = null,
            },
        },
        DataProfileSpec = null,
        Project = "my-project-name",
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataplex.Datascan;
import com.pulumi.gcp.dataplex.DatascanArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerOnDemandArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var basicProfile = new Datascan("basicProfile", DatascanArgs.builder()
            .location("us-central1")
            .dataScanId("dataprofile-basic")
            .data(DatascanDataArgs.builder()
                .resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare")
                .build())
            .executionSpec(DatascanExecutionSpecArgs.builder()
                .trigger(DatascanExecutionSpecTriggerArgs.builder()
                    .onDemand()
                    .build())
                .build())
            .dataProfileSpec()
            .project("my-project-name")
            .build());
    }
}
resources:
  basicProfile:
    type: gcp:dataplex:Datascan
    name: basic_profile
    properties:
      location: us-central1
      dataScanId: dataprofile-basic
      data:
        resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare
      executionSpec:
        trigger:
          onDemand: {}
      dataProfileSpec: {}
      project: my-project-name
Dataplex Datascan Full Profile
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const source = new gcp.bigquery.Dataset("source", {
    datasetId: "dataplex_dataset",
    friendlyName: "test",
    description: "This is a test description",
    location: "US",
    deleteContentsOnDestroy: true,
});
const fullProfile = new gcp.dataplex.Datascan("full_profile", {
    location: "us-central1",
    displayName: "Full Datascan Profile",
    dataScanId: "dataprofile-full",
    description: "Example resource - Full Datascan Profile",
    labels: {
        author: "billing",
    },
    data: {
        resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
    },
    executionSpec: {
        trigger: {
            schedule: {
                cron: "TZ=America/New_York 1 1 * * *",
            },
        },
    },
    dataProfileSpec: {
        samplingPercent: 80,
        rowFilter: "word_count > 10",
        includeFields: {
            fieldNames: ["word_count"],
        },
        excludeFields: {
            fieldNames: ["property_type"],
        },
        postScanActions: {
            bigqueryExport: {
                resultsTable: "//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export",
            },
        },
    },
    project: "my-project-name",
}, {
    dependsOn: [source],
});
import pulumi
import pulumi_gcp as gcp
source = gcp.bigquery.Dataset("source",
    dataset_id="dataplex_dataset",
    friendly_name="test",
    description="This is a test description",
    location="US",
    delete_contents_on_destroy=True)
full_profile = gcp.dataplex.Datascan("full_profile",
    location="us-central1",
    display_name="Full Datascan Profile",
    data_scan_id="dataprofile-full",
    description="Example resource - Full Datascan Profile",
    labels={
        "author": "billing",
    },
    data={
        "resource": "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
    },
    execution_spec={
        "trigger": {
            "schedule": {
                "cron": "TZ=America/New_York 1 1 * * *",
            },
        },
    },
    data_profile_spec={
        "sampling_percent": 80,
        "row_filter": "word_count > 10",
        "include_fields": {
            "field_names": ["word_count"],
        },
        "exclude_fields": {
            "field_names": ["property_type"],
        },
        "post_scan_actions": {
            "bigquery_export": {
                "results_table": "//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export",
            },
        },
    },
    project="my-project-name",
    opts = pulumi.ResourceOptions(depends_on=[source]))
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataplex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		source, err := bigquery.NewDataset(ctx, "source", &bigquery.DatasetArgs{
			DatasetId:               pulumi.String("dataplex_dataset"),
			FriendlyName:            pulumi.String("test"),
			Description:             pulumi.String("This is a test description"),
			Location:                pulumi.String("US"),
			DeleteContentsOnDestroy: pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		_, err = dataplex.NewDatascan(ctx, "full_profile", &dataplex.DatascanArgs{
			Location:    pulumi.String("us-central1"),
			DisplayName: pulumi.String("Full Datascan Profile"),
			DataScanId:  pulumi.String("dataprofile-full"),
			Description: pulumi.String("Example resource - Full Datascan Profile"),
			Labels: pulumi.StringMap{
				"author": pulumi.String("billing"),
			},
			Data: &dataplex.DatascanDataArgs{
				Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare"),
			},
			ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
				Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
					Schedule: &dataplex.DatascanExecutionSpecTriggerScheduleArgs{
						Cron: pulumi.String("TZ=America/New_York 1 1 * * *"),
					},
				},
			},
			DataProfileSpec: &dataplex.DatascanDataProfileSpecArgs{
				SamplingPercent: pulumi.Float64(80),
				RowFilter:       pulumi.String("word_count > 10"),
				IncludeFields: &dataplex.DatascanDataProfileSpecIncludeFieldsArgs{
					FieldNames: pulumi.StringArray{
						pulumi.String("word_count"),
					},
				},
				ExcludeFields: &dataplex.DatascanDataProfileSpecExcludeFieldsArgs{
					FieldNames: pulumi.StringArray{
						pulumi.String("property_type"),
					},
				},
				PostScanActions: &dataplex.DatascanDataProfileSpecPostScanActionsArgs{
					BigqueryExport: &dataplex.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs{
						ResultsTable: pulumi.String("//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export"),
					},
				},
			},
			Project: pulumi.String("my-project-name"),
		}, pulumi.DependsOn([]pulumi.Resource{
			source,
		}))
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var source = new Gcp.BigQuery.Dataset("source", new()
    {
        DatasetId = "dataplex_dataset",
        FriendlyName = "test",
        Description = "This is a test description",
        Location = "US",
        DeleteContentsOnDestroy = true,
    });
    var fullProfile = new Gcp.DataPlex.Datascan("full_profile", new()
    {
        Location = "us-central1",
        DisplayName = "Full Datascan Profile",
        DataScanId = "dataprofile-full",
        Description = "Example resource - Full Datascan Profile",
        Labels = 
        {
            { "author", "billing" },
        },
        Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
        {
            Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
        },
        ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
        {
            Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
            {
                Schedule = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerScheduleArgs
                {
                    Cron = "TZ=America/New_York 1 1 * * *",
                },
            },
        },
        DataProfileSpec = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecArgs
        {
            SamplingPercent = 80,
            RowFilter = "word_count > 10",
            IncludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecIncludeFieldsArgs
            {
                FieldNames = new[]
                {
                    "word_count",
                },
            },
            ExcludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecExcludeFieldsArgs
            {
                FieldNames = new[]
                {
                    "property_type",
                },
            },
            PostScanActions = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsArgs
            {
                BigqueryExport = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs
                {
                    ResultsTable = "//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export",
                },
            },
        },
        Project = "my-project-name",
    }, new CustomResourceOptions
    {
        DependsOn =
        {
            source,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.dataplex.Datascan;
import com.pulumi.gcp.dataplex.DatascanArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerScheduleArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecIncludeFieldsArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecExcludeFieldsArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecPostScanActionsArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs;
import com.pulumi.resources.CustomResourceOptions;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var source = new Dataset("source", DatasetArgs.builder()
            .datasetId("dataplex_dataset")
            .friendlyName("test")
            .description("This is a test description")
            .location("US")
            .deleteContentsOnDestroy(true)
            .build());
        var fullProfile = new Datascan("fullProfile", DatascanArgs.builder()
            .location("us-central1")
            .displayName("Full Datascan Profile")
            .dataScanId("dataprofile-full")
            .description("Example resource - Full Datascan Profile")
            .labels(Map.of("author", "billing"))
            .data(DatascanDataArgs.builder()
                .resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare")
                .build())
            .executionSpec(DatascanExecutionSpecArgs.builder()
                .trigger(DatascanExecutionSpecTriggerArgs.builder()
                    .schedule(DatascanExecutionSpecTriggerScheduleArgs.builder()
                        .cron("TZ=America/New_York 1 1 * * *")
                        .build())
                    .build())
                .build())
            .dataProfileSpec(DatascanDataProfileSpecArgs.builder()
                .samplingPercent(80)
                .rowFilter("word_count > 10")
                .includeFields(DatascanDataProfileSpecIncludeFieldsArgs.builder()
                    .fieldNames("word_count")
                    .build())
                .excludeFields(DatascanDataProfileSpecExcludeFieldsArgs.builder()
                    .fieldNames("property_type")
                    .build())
                .postScanActions(DatascanDataProfileSpecPostScanActionsArgs.builder()
                    .bigqueryExport(DatascanDataProfileSpecPostScanActionsBigqueryExportArgs.builder()
                        .resultsTable("//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export")
                        .build())
                    .build())
                .build())
            .project("my-project-name")
            .build(), CustomResourceOptions.builder()
                .dependsOn(source)
                .build());
    }
}
resources:
  fullProfile:
    type: gcp:dataplex:Datascan
    name: full_profile
    properties:
      location: us-central1
      displayName: Full Datascan Profile
      dataScanId: dataprofile-full
      description: Example resource - Full Datascan Profile
      labels:
        author: billing
      data:
        resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare
      executionSpec:
        trigger:
          schedule:
            cron: TZ=America/New_York 1 1 * * *
      dataProfileSpec:
        samplingPercent: 80
        rowFilter: word_count > 10
        includeFields:
          fieldNames:
            - word_count
        excludeFields:
          fieldNames:
            - property_type
        postScanActions:
          bigqueryExport:
            resultsTable: //bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export
      project: my-project-name
    options:
      dependsOn:
        - ${source}
  source:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: dataplex_dataset
      friendlyName: test
      description: This is a test description
      location: US
      deleteContentsOnDestroy: true
Dataplex Datascan Basic Quality
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const basicQuality = new gcp.dataplex.Datascan("basic_quality", {
    location: "us-central1",
    dataScanId: "dataquality-basic",
    data: {
        resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
    },
    executionSpec: {
        trigger: {
            onDemand: {},
        },
    },
    dataQualitySpec: {
        rules: [{
            dimension: "VALIDITY",
            name: "rule1",
            description: "rule 1 for validity dimension",
            tableConditionExpectation: {
                sqlExpression: "COUNT(*) > 0",
            },
        }],
    },
    project: "my-project-name",
});
import pulumi
import pulumi_gcp as gcp
basic_quality = gcp.dataplex.Datascan("basic_quality",
    location="us-central1",
    data_scan_id="dataquality-basic",
    data={
        "resource": "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
    },
    execution_spec={
        "trigger": {
            "on_demand": {},
        },
    },
    data_quality_spec={
        "rules": [{
            "dimension": "VALIDITY",
            "name": "rule1",
            "description": "rule 1 for validity dimension",
            "table_condition_expectation": {
                "sql_expression": "COUNT(*) > 0",
            },
        }],
    },
    project="my-project-name")
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataplex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataplex.NewDatascan(ctx, "basic_quality", &dataplex.DatascanArgs{
			Location:   pulumi.String("us-central1"),
			DataScanId: pulumi.String("dataquality-basic"),
			Data: &dataplex.DatascanDataArgs{
				Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare"),
			},
			ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
				Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
					OnDemand: &dataplex.DatascanExecutionSpecTriggerOnDemandArgs{},
				},
			},
			DataQualitySpec: &dataplex.DatascanDataQualitySpecArgs{
				Rules: dataplex.DatascanDataQualitySpecRuleArray{
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Dimension:   pulumi.String("VALIDITY"),
						Name:        pulumi.String("rule1"),
						Description: pulumi.String("rule 1 for validity dimension"),
						TableConditionExpectation: &dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs{
							SqlExpression: pulumi.String("COUNT(*) > 0"),
						},
					},
				},
			},
			Project: pulumi.String("my-project-name"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var basicQuality = new Gcp.DataPlex.Datascan("basic_quality", new()
    {
        Location = "us-central1",
        DataScanId = "dataquality-basic",
        Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
        {
            Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare",
        },
        ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
        {
            Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
            {
                OnDemand = null,
            },
        },
        DataQualitySpec = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecArgs
        {
            Rules = new[]
            {
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Dimension = "VALIDITY",
                    Name = "rule1",
                    Description = "rule 1 for validity dimension",
                    TableConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleTableConditionExpectationArgs
                    {
                        SqlExpression = "COUNT(*) > 0",
                    },
                },
            },
        },
        Project = "my-project-name",
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataplex.Datascan;
import com.pulumi.gcp.dataplex.DatascanArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerOnDemandArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataQualitySpecArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var basicQuality = new Datascan("basicQuality", DatascanArgs.builder()
            .location("us-central1")
            .dataScanId("dataquality-basic")
            .data(DatascanDataArgs.builder()
                .resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare")
                .build())
            .executionSpec(DatascanExecutionSpecArgs.builder()
                .trigger(DatascanExecutionSpecTriggerArgs.builder()
                    .onDemand()
                    .build())
                .build())
            .dataQualitySpec(DatascanDataQualitySpecArgs.builder()
                .rules(DatascanDataQualitySpecRuleArgs.builder()
                    .dimension("VALIDITY")
                    .name("rule1")
                    .description("rule 1 for validity dimension")
                    .tableConditionExpectation(DatascanDataQualitySpecRuleTableConditionExpectationArgs.builder()
                        .sqlExpression("COUNT(*) > 0")
                        .build())
                    .build())
                .build())
            .project("my-project-name")
            .build());
    }
}
resources:
  basicQuality:
    type: gcp:dataplex:Datascan
    name: basic_quality
    properties:
      location: us-central1
      dataScanId: dataquality-basic
      data:
        resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare
      executionSpec:
        trigger:
          onDemand: {}
      dataQualitySpec:
        rules:
          - dimension: VALIDITY
            name: rule1
            description: rule 1 for validity dimension
            tableConditionExpectation:
              sqlExpression: COUNT(*) > 0
      project: my-project-name
Dataplex Datascan Full Quality
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const fullQuality = new gcp.dataplex.Datascan("full_quality", {
    location: "us-central1",
    displayName: "Full Datascan Quality",
    dataScanId: "dataquality-full",
    description: "Example resource - Full Datascan Quality",
    labels: {
        author: "billing",
    },
    data: {
        resource: "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations",
    },
    executionSpec: {
        trigger: {
            schedule: {
                cron: "TZ=America/New_York 1 1 * * *",
            },
        },
        field: "modified_date",
    },
    dataQualitySpec: {
        samplingPercent: 5,
        rowFilter: "station_id > 1000",
        rules: [
            {
                column: "address",
                dimension: "VALIDITY",
                threshold: 0.99,
                nonNullExpectation: {},
            },
            {
                column: "council_district",
                dimension: "VALIDITY",
                ignoreNull: true,
                threshold: 0.9,
                rangeExpectation: {
                    minValue: "1",
                    maxValue: "10",
                    strictMinEnabled: true,
                    strictMaxEnabled: false,
                },
            },
            {
                column: "power_type",
                dimension: "VALIDITY",
                ignoreNull: false,
                regexExpectation: {
                    regex: ".*solar.*",
                },
            },
            {
                column: "property_type",
                dimension: "VALIDITY",
                ignoreNull: false,
                setExpectation: {
                    values: [
                        "sidewalk",
                        "parkland",
                    ],
                },
            },
            {
                column: "address",
                dimension: "UNIQUENESS",
                uniquenessExpectation: {},
            },
            {
                column: "number_of_docks",
                dimension: "VALIDITY",
                statisticRangeExpectation: {
                    statistic: "MEAN",
                    minValue: "5",
                    maxValue: "15",
                    strictMinEnabled: true,
                    strictMaxEnabled: true,
                },
            },
            {
                column: "footprint_length",
                dimension: "VALIDITY",
                rowConditionExpectation: {
                    sqlExpression: "footprint_length > 0 AND footprint_length <= 10",
                },
            },
            {
                dimension: "VALIDITY",
                tableConditionExpectation: {
                    sqlExpression: "COUNT(*) > 0",
                },
            },
            {
                dimension: "VALIDITY",
                sqlAssertion: {
                    sqlStatement: "select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null",
                },
            },
        ],
    },
    project: "my-project-name",
});
import pulumi
import pulumi_gcp as gcp
full_quality = gcp.dataplex.Datascan("full_quality",
    location="us-central1",
    display_name="Full Datascan Quality",
    data_scan_id="dataquality-full",
    description="Example resource - Full Datascan Quality",
    labels={
        "author": "billing",
    },
    data={
        "resource": "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations",
    },
    execution_spec={
        "trigger": {
            "schedule": {
                "cron": "TZ=America/New_York 1 1 * * *",
            },
        },
        "field": "modified_date",
    },
    data_quality_spec={
        "sampling_percent": 5,
        "row_filter": "station_id > 1000",
        "rules": [
            {
                "column": "address",
                "dimension": "VALIDITY",
                "threshold": 0.99,
                "non_null_expectation": {},
            },
            {
                "column": "council_district",
                "dimension": "VALIDITY",
                "ignore_null": True,
                "threshold": 0.9,
                "range_expectation": {
                    "min_value": "1",
                    "max_value": "10",
                    "strict_min_enabled": True,
                    "strict_max_enabled": False,
                },
            },
            {
                "column": "power_type",
                "dimension": "VALIDITY",
                "ignore_null": False,
                "regex_expectation": {
                    "regex": ".*solar.*",
                },
            },
            {
                "column": "property_type",
                "dimension": "VALIDITY",
                "ignore_null": False,
                "set_expectation": {
                    "values": [
                        "sidewalk",
                        "parkland",
                    ],
                },
            },
            {
                "column": "address",
                "dimension": "UNIQUENESS",
                "uniqueness_expectation": {},
            },
            {
                "column": "number_of_docks",
                "dimension": "VALIDITY",
                "statistic_range_expectation": {
                    "statistic": "MEAN",
                    "min_value": "5",
                    "max_value": "15",
                    "strict_min_enabled": True,
                    "strict_max_enabled": True,
                },
            },
            {
                "column": "footprint_length",
                "dimension": "VALIDITY",
                "row_condition_expectation": {
                    "sql_expression": "footprint_length > 0 AND footprint_length <= 10",
                },
            },
            {
                "dimension": "VALIDITY",
                "table_condition_expectation": {
                    "sql_expression": "COUNT(*) > 0",
                },
            },
            {
                "dimension": "VALIDITY",
                "sql_assertion": {
                    "sql_statement": "select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null",
                },
            },
        ],
    },
    project="my-project-name")
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataplex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := dataplex.NewDatascan(ctx, "full_quality", &dataplex.DatascanArgs{
			Location:    pulumi.String("us-central1"),
			DisplayName: pulumi.String("Full Datascan Quality"),
			DataScanId:  pulumi.String("dataquality-full"),
			Description: pulumi.String("Example resource - Full Datascan Quality"),
			Labels: pulumi.StringMap{
				"author": pulumi.String("billing"),
			},
			Data: &dataplex.DatascanDataArgs{
				Resource: pulumi.String("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations"),
			},
			ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
				Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
					Schedule: &dataplex.DatascanExecutionSpecTriggerScheduleArgs{
						Cron: pulumi.String("TZ=America/New_York 1 1 * * *"),
					},
				},
				Field: pulumi.String("modified_date"),
			},
			DataQualitySpec: &dataplex.DatascanDataQualitySpecArgs{
				SamplingPercent: pulumi.Float64(5),
				RowFilter:       pulumi.String("station_id > 1000"),
				Rules: dataplex.DatascanDataQualitySpecRuleArray{
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Column:             pulumi.String("address"),
						Dimension:          pulumi.String("VALIDITY"),
						Threshold:          pulumi.Float64(0.99),
						NonNullExpectation: &dataplex.DatascanDataQualitySpecRuleNonNullExpectationArgs{},
					},
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Column:     pulumi.String("council_district"),
						Dimension:  pulumi.String("VALIDITY"),
						IgnoreNull: pulumi.Bool(true),
						Threshold:  pulumi.Float64(0.9),
						RangeExpectation: &dataplex.DatascanDataQualitySpecRuleRangeExpectationArgs{
							MinValue:         pulumi.String("1"),
							MaxValue:         pulumi.String("10"),
							StrictMinEnabled: pulumi.Bool(true),
							StrictMaxEnabled: pulumi.Bool(false),
						},
					},
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Column:     pulumi.String("power_type"),
						Dimension:  pulumi.String("VALIDITY"),
						IgnoreNull: pulumi.Bool(false),
						RegexExpectation: &dataplex.DatascanDataQualitySpecRuleRegexExpectationArgs{
							Regex: pulumi.String(".*solar.*"),
						},
					},
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Column:     pulumi.String("property_type"),
						Dimension:  pulumi.String("VALIDITY"),
						IgnoreNull: pulumi.Bool(false),
						SetExpectation: &dataplex.DatascanDataQualitySpecRuleSetExpectationArgs{
							Values: pulumi.StringArray{
								pulumi.String("sidewalk"),
								pulumi.String("parkland"),
							},
						},
					},
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Column:                pulumi.String("address"),
						Dimension:             pulumi.String("UNIQUENESS"),
						UniquenessExpectation: &dataplex.DatascanDataQualitySpecRuleUniquenessExpectationArgs{},
					},
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Column:    pulumi.String("number_of_docks"),
						Dimension: pulumi.String("VALIDITY"),
						StatisticRangeExpectation: &dataplex.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs{
							Statistic:        pulumi.String("MEAN"),
							MinValue:         pulumi.String("5"),
							MaxValue:         pulumi.String("15"),
							StrictMinEnabled: pulumi.Bool(true),
							StrictMaxEnabled: pulumi.Bool(true),
						},
					},
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Column:    pulumi.String("footprint_length"),
						Dimension: pulumi.String("VALIDITY"),
						RowConditionExpectation: &dataplex.DatascanDataQualitySpecRuleRowConditionExpectationArgs{
							SqlExpression: pulumi.String("footprint_length > 0 AND footprint_length <= 10"),
						},
					},
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Dimension: pulumi.String("VALIDITY"),
						TableConditionExpectation: &dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs{
							SqlExpression: pulumi.String("COUNT(*) > 0"),
						},
					},
					&dataplex.DatascanDataQualitySpecRuleArgs{
						Dimension: pulumi.String("VALIDITY"),
						SqlAssertion: &dataplex.DatascanDataQualitySpecRuleSqlAssertionArgs{
							SqlStatement: pulumi.String("select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null"),
						},
					},
				},
			},
			Project: pulumi.String("my-project-name"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var fullQuality = new Gcp.DataPlex.Datascan("full_quality", new()
    {
        Location = "us-central1",
        DisplayName = "Full Datascan Quality",
        DataScanId = "dataquality-full",
        Description = "Example resource - Full Datascan Quality",
        Labels = 
        {
            { "author", "billing" },
        },
        Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
        {
            Resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations",
        },
        ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
        {
            Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
            {
                Schedule = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerScheduleArgs
                {
                    Cron = "TZ=America/New_York 1 1 * * *",
                },
            },
            Field = "modified_date",
        },
        DataQualitySpec = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecArgs
        {
            SamplingPercent = 5,
            RowFilter = "station_id > 1000",
            Rules = new[]
            {
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Column = "address",
                    Dimension = "VALIDITY",
                    Threshold = 0.99,
                    NonNullExpectation = null,
                },
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Column = "council_district",
                    Dimension = "VALIDITY",
                    IgnoreNull = true,
                    Threshold = 0.9,
                    RangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRangeExpectationArgs
                    {
                        MinValue = "1",
                        MaxValue = "10",
                        StrictMinEnabled = true,
                        StrictMaxEnabled = false,
                    },
                },
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Column = "power_type",
                    Dimension = "VALIDITY",
                    IgnoreNull = false,
                    RegexExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRegexExpectationArgs
                    {
                        Regex = ".*solar.*",
                    },
                },
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Column = "property_type",
                    Dimension = "VALIDITY",
                    IgnoreNull = false,
                    SetExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleSetExpectationArgs
                    {
                        Values = new[]
                        {
                            "sidewalk",
                            "parkland",
                        },
                    },
                },
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Column = "address",
                    Dimension = "UNIQUENESS",
                    UniquenessExpectation = null,
                },
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Column = "number_of_docks",
                    Dimension = "VALIDITY",
                    StatisticRangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs
                    {
                        Statistic = "MEAN",
                        MinValue = "5",
                        MaxValue = "15",
                        StrictMinEnabled = true,
                        StrictMaxEnabled = true,
                    },
                },
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Column = "footprint_length",
                    Dimension = "VALIDITY",
                    RowConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRowConditionExpectationArgs
                    {
                        SqlExpression = "footprint_length > 0 AND footprint_length <= 10",
                    },
                },
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Dimension = "VALIDITY",
                    TableConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleTableConditionExpectationArgs
                    {
                        SqlExpression = "COUNT(*) > 0",
                    },
                },
                new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
                {
                    Dimension = "VALIDITY",
                    SqlAssertion = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleSqlAssertionArgs
                    {
                        SqlStatement = "select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null",
                    },
                },
            },
        },
        Project = "my-project-name",
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.dataplex.Datascan;
import com.pulumi.gcp.dataplex.DatascanArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanExecutionSpecTriggerScheduleArgs;
import com.pulumi.gcp.dataplex.inputs.DatascanDataQualitySpecArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var fullQuality = new Datascan("fullQuality", DatascanArgs.builder()
            .location("us-central1")
            .displayName("Full Datascan Quality")
            .dataScanId("dataquality-full")
            .description("Example resource - Full Datascan Quality")
            .labels(Map.of("author", "billing"))
            .data(DatascanDataArgs.builder()
                .resource("//bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations")
                .build())
            .executionSpec(DatascanExecutionSpecArgs.builder()
                .trigger(DatascanExecutionSpecTriggerArgs.builder()
                    .schedule(DatascanExecutionSpecTriggerScheduleArgs.builder()
                        .cron("TZ=America/New_York 1 1 * * *")
                        .build())
                    .build())
                .field("modified_date")
                .build())
            .dataQualitySpec(DatascanDataQualitySpecArgs.builder()
                .samplingPercent(5)
                .rowFilter("station_id > 1000")
                .rules(                
                    DatascanDataQualitySpecRuleArgs.builder()
                        .column("address")
                        .dimension("VALIDITY")
                        .threshold(0.99)
                        .nonNullExpectation()
                        .build(),
                    DatascanDataQualitySpecRuleArgs.builder()
                        .column("council_district")
                        .dimension("VALIDITY")
                        .ignoreNull(true)
                        .threshold(0.9)
                        .rangeExpectation(DatascanDataQualitySpecRuleRangeExpectationArgs.builder()
                            .minValue(1)
                            .maxValue(10)
                            .strictMinEnabled(true)
                            .strictMaxEnabled(false)
                            .build())
                        .build(),
                    DatascanDataQualitySpecRuleArgs.builder()
                        .column("power_type")
                        .dimension("VALIDITY")
                        .ignoreNull(false)
                        .regexExpectation(DatascanDataQualitySpecRuleRegexExpectationArgs.builder()
                            .regex(".*solar.*")
                            .build())
                        .build(),
                    DatascanDataQualitySpecRuleArgs.builder()
                        .column("property_type")
                        .dimension("VALIDITY")
                        .ignoreNull(false)
                        .setExpectation(DatascanDataQualitySpecRuleSetExpectationArgs.builder()
                            .values(                            
                                "sidewalk",
                                "parkland")
                            .build())
                        .build(),
                    DatascanDataQualitySpecRuleArgs.builder()
                        .column("address")
                        .dimension("UNIQUENESS")
                        .uniquenessExpectation()
                        .build(),
                    DatascanDataQualitySpecRuleArgs.builder()
                        .column("number_of_docks")
                        .dimension("VALIDITY")
                        .statisticRangeExpectation(DatascanDataQualitySpecRuleStatisticRangeExpectationArgs.builder()
                            .statistic("MEAN")
                            .minValue(5)
                            .maxValue(15)
                            .strictMinEnabled(true)
                            .strictMaxEnabled(true)
                            .build())
                        .build(),
                    DatascanDataQualitySpecRuleArgs.builder()
                        .column("footprint_length")
                        .dimension("VALIDITY")
                        .rowConditionExpectation(DatascanDataQualitySpecRuleRowConditionExpectationArgs.builder()
                            .sqlExpression("footprint_length > 0 AND footprint_length <= 10")
                            .build())
                        .build(),
                    DatascanDataQualitySpecRuleArgs.builder()
                        .dimension("VALIDITY")
                        .tableConditionExpectation(DatascanDataQualitySpecRuleTableConditionExpectationArgs.builder()
                            .sqlExpression("COUNT(*) > 0")
                            .build())
                        .build(),
                    DatascanDataQualitySpecRuleArgs.builder()
                        .dimension("VALIDITY")
                        .sqlAssertion(DatascanDataQualitySpecRuleSqlAssertionArgs.builder()
                            .sqlStatement("select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null")
                            .build())
                        .build())
                .build())
            .project("my-project-name")
            .build());
    }
}
resources:
  fullQuality:
    type: gcp:dataplex:Datascan
    name: full_quality
    properties:
      location: us-central1
      displayName: Full Datascan Quality
      dataScanId: dataquality-full
      description: Example resource - Full Datascan Quality
      labels:
        author: billing
      data:
        resource: //bigquery.googleapis.com/projects/bigquery-public-data/datasets/austin_bikeshare/tables/bikeshare_stations
      executionSpec:
        trigger:
          schedule:
            cron: TZ=America/New_York 1 1 * * *
        field: modified_date
      dataQualitySpec:
        samplingPercent: 5
        rowFilter: station_id > 1000
        rules:
          - column: address
            dimension: VALIDITY
            threshold: 0.99
            nonNullExpectation: {}
          - column: council_district
            dimension: VALIDITY
            ignoreNull: true
            threshold: 0.9
            rangeExpectation:
              minValue: 1
              maxValue: 10
              strictMinEnabled: true
              strictMaxEnabled: false
          - column: power_type
            dimension: VALIDITY
            ignoreNull: false
            regexExpectation:
              regex: .*solar.*
          - column: property_type
            dimension: VALIDITY
            ignoreNull: false
            setExpectation:
              values:
                - sidewalk
                - parkland
          - column: address
            dimension: UNIQUENESS
            uniquenessExpectation: {}
          - column: number_of_docks
            dimension: VALIDITY
            statisticRangeExpectation:
              statistic: MEAN
              minValue: 5
              maxValue: 15
              strictMinEnabled: true
              strictMaxEnabled: true
          - column: footprint_length
            dimension: VALIDITY
            rowConditionExpectation:
              sqlExpression: footprint_length > 0 AND footprint_length <= 10
          - dimension: VALIDITY
            tableConditionExpectation:
              sqlExpression: COUNT(*) > 0
          - dimension: VALIDITY
            sqlAssertion:
              sqlStatement: select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null
      project: my-project-name
Create Datascan Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Datascan(name: string, args: DatascanArgs, opts?: CustomResourceOptions);@overload
def Datascan(resource_name: str,
             args: DatascanArgs,
             opts: Optional[ResourceOptions] = None)
@overload
def Datascan(resource_name: str,
             opts: Optional[ResourceOptions] = None,
             data: Optional[DatascanDataArgs] = None,
             data_scan_id: Optional[str] = None,
             execution_spec: Optional[DatascanExecutionSpecArgs] = None,
             location: Optional[str] = None,
             data_profile_spec: Optional[DatascanDataProfileSpecArgs] = None,
             data_quality_spec: Optional[DatascanDataQualitySpecArgs] = None,
             description: Optional[str] = None,
             display_name: Optional[str] = None,
             labels: Optional[Mapping[str, str]] = None,
             project: Optional[str] = None)func NewDatascan(ctx *Context, name string, args DatascanArgs, opts ...ResourceOption) (*Datascan, error)public Datascan(string name, DatascanArgs args, CustomResourceOptions? opts = null)
public Datascan(String name, DatascanArgs args)
public Datascan(String name, DatascanArgs args, CustomResourceOptions options)
type: gcp:dataplex:Datascan
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args DatascanArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var datascanResource = new Gcp.DataPlex.Datascan("datascanResource", new()
{
    Data = new Gcp.DataPlex.Inputs.DatascanDataArgs
    {
        Entity = "string",
        Resource = "string",
    },
    DataScanId = "string",
    ExecutionSpec = new Gcp.DataPlex.Inputs.DatascanExecutionSpecArgs
    {
        Trigger = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerArgs
        {
            OnDemand = null,
            Schedule = new Gcp.DataPlex.Inputs.DatascanExecutionSpecTriggerScheduleArgs
            {
                Cron = "string",
            },
        },
        Field = "string",
    },
    Location = "string",
    DataProfileSpec = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecArgs
    {
        ExcludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecExcludeFieldsArgs
        {
            FieldNames = new[]
            {
                "string",
            },
        },
        IncludeFields = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecIncludeFieldsArgs
        {
            FieldNames = new[]
            {
                "string",
            },
        },
        PostScanActions = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsArgs
        {
            BigqueryExport = new Gcp.DataPlex.Inputs.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs
            {
                ResultsTable = "string",
            },
        },
        RowFilter = "string",
        SamplingPercent = 0,
    },
    DataQualitySpec = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecArgs
    {
        PostScanActions = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecPostScanActionsArgs
        {
            BigqueryExport = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecPostScanActionsBigqueryExportArgs
            {
                ResultsTable = "string",
            },
        },
        RowFilter = "string",
        Rules = new[]
        {
            new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleArgs
            {
                Dimension = "string",
                RangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRangeExpectationArgs
                {
                    MaxValue = "string",
                    MinValue = "string",
                    StrictMaxEnabled = false,
                    StrictMinEnabled = false,
                },
                RowConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRowConditionExpectationArgs
                {
                    SqlExpression = "string",
                },
                IgnoreNull = false,
                Name = "string",
                NonNullExpectation = null,
                Column = "string",
                RegexExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleRegexExpectationArgs
                {
                    Regex = "string",
                },
                Description = "string",
                SetExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleSetExpectationArgs
                {
                    Values = new[]
                    {
                        "string",
                    },
                },
                SqlAssertion = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleSqlAssertionArgs
                {
                    SqlStatement = "string",
                },
                StatisticRangeExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs
                {
                    Statistic = "string",
                    MaxValue = "string",
                    MinValue = "string",
                    StrictMaxEnabled = false,
                    StrictMinEnabled = false,
                },
                TableConditionExpectation = new Gcp.DataPlex.Inputs.DatascanDataQualitySpecRuleTableConditionExpectationArgs
                {
                    SqlExpression = "string",
                },
                Threshold = 0,
                UniquenessExpectation = null,
            },
        },
        SamplingPercent = 0,
    },
    Description = "string",
    DisplayName = "string",
    Labels = 
    {
        { "string", "string" },
    },
    Project = "string",
});
example, err := dataplex.NewDatascan(ctx, "datascanResource", &dataplex.DatascanArgs{
	Data: &dataplex.DatascanDataArgs{
		Entity:   pulumi.String("string"),
		Resource: pulumi.String("string"),
	},
	DataScanId: pulumi.String("string"),
	ExecutionSpec: &dataplex.DatascanExecutionSpecArgs{
		Trigger: &dataplex.DatascanExecutionSpecTriggerArgs{
			OnDemand: &dataplex.DatascanExecutionSpecTriggerOnDemandArgs{},
			Schedule: &dataplex.DatascanExecutionSpecTriggerScheduleArgs{
				Cron: pulumi.String("string"),
			},
		},
		Field: pulumi.String("string"),
	},
	Location: pulumi.String("string"),
	DataProfileSpec: &dataplex.DatascanDataProfileSpecArgs{
		ExcludeFields: &dataplex.DatascanDataProfileSpecExcludeFieldsArgs{
			FieldNames: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
		IncludeFields: &dataplex.DatascanDataProfileSpecIncludeFieldsArgs{
			FieldNames: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
		PostScanActions: &dataplex.DatascanDataProfileSpecPostScanActionsArgs{
			BigqueryExport: &dataplex.DatascanDataProfileSpecPostScanActionsBigqueryExportArgs{
				ResultsTable: pulumi.String("string"),
			},
		},
		RowFilter:       pulumi.String("string"),
		SamplingPercent: pulumi.Float64(0),
	},
	DataQualitySpec: &dataplex.DatascanDataQualitySpecArgs{
		PostScanActions: &dataplex.DatascanDataQualitySpecPostScanActionsArgs{
			BigqueryExport: &dataplex.DatascanDataQualitySpecPostScanActionsBigqueryExportArgs{
				ResultsTable: pulumi.String("string"),
			},
		},
		RowFilter: pulumi.String("string"),
		Rules: dataplex.DatascanDataQualitySpecRuleArray{
			&dataplex.DatascanDataQualitySpecRuleArgs{
				Dimension: pulumi.String("string"),
				RangeExpectation: &dataplex.DatascanDataQualitySpecRuleRangeExpectationArgs{
					MaxValue:         pulumi.String("string"),
					MinValue:         pulumi.String("string"),
					StrictMaxEnabled: pulumi.Bool(false),
					StrictMinEnabled: pulumi.Bool(false),
				},
				RowConditionExpectation: &dataplex.DatascanDataQualitySpecRuleRowConditionExpectationArgs{
					SqlExpression: pulumi.String("string"),
				},
				IgnoreNull:         pulumi.Bool(false),
				Name:               pulumi.String("string"),
				NonNullExpectation: &dataplex.DatascanDataQualitySpecRuleNonNullExpectationArgs{},
				Column:             pulumi.String("string"),
				RegexExpectation: &dataplex.DatascanDataQualitySpecRuleRegexExpectationArgs{
					Regex: pulumi.String("string"),
				},
				Description: pulumi.String("string"),
				SetExpectation: &dataplex.DatascanDataQualitySpecRuleSetExpectationArgs{
					Values: pulumi.StringArray{
						pulumi.String("string"),
					},
				},
				SqlAssertion: &dataplex.DatascanDataQualitySpecRuleSqlAssertionArgs{
					SqlStatement: pulumi.String("string"),
				},
				StatisticRangeExpectation: &dataplex.DatascanDataQualitySpecRuleStatisticRangeExpectationArgs{
					Statistic:        pulumi.String("string"),
					MaxValue:         pulumi.String("string"),
					MinValue:         pulumi.String("string"),
					StrictMaxEnabled: pulumi.Bool(false),
					StrictMinEnabled: pulumi.Bool(false),
				},
				TableConditionExpectation: &dataplex.DatascanDataQualitySpecRuleTableConditionExpectationArgs{
					SqlExpression: pulumi.String("string"),
				},
				Threshold:             pulumi.Float64(0),
				UniquenessExpectation: &dataplex.DatascanDataQualitySpecRuleUniquenessExpectationArgs{},
			},
		},
		SamplingPercent: pulumi.Float64(0),
	},
	Description: pulumi.String("string"),
	DisplayName: pulumi.String("string"),
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Project: pulumi.String("string"),
})
var datascanResource = new Datascan("datascanResource", DatascanArgs.builder()
    .data(DatascanDataArgs.builder()
        .entity("string")
        .resource("string")
        .build())
    .dataScanId("string")
    .executionSpec(DatascanExecutionSpecArgs.builder()
        .trigger(DatascanExecutionSpecTriggerArgs.builder()
            .onDemand()
            .schedule(DatascanExecutionSpecTriggerScheduleArgs.builder()
                .cron("string")
                .build())
            .build())
        .field("string")
        .build())
    .location("string")
    .dataProfileSpec(DatascanDataProfileSpecArgs.builder()
        .excludeFields(DatascanDataProfileSpecExcludeFieldsArgs.builder()
            .fieldNames("string")
            .build())
        .includeFields(DatascanDataProfileSpecIncludeFieldsArgs.builder()
            .fieldNames("string")
            .build())
        .postScanActions(DatascanDataProfileSpecPostScanActionsArgs.builder()
            .bigqueryExport(DatascanDataProfileSpecPostScanActionsBigqueryExportArgs.builder()
                .resultsTable("string")
                .build())
            .build())
        .rowFilter("string")
        .samplingPercent(0)
        .build())
    .dataQualitySpec(DatascanDataQualitySpecArgs.builder()
        .postScanActions(DatascanDataQualitySpecPostScanActionsArgs.builder()
            .bigqueryExport(DatascanDataQualitySpecPostScanActionsBigqueryExportArgs.builder()
                .resultsTable("string")
                .build())
            .build())
        .rowFilter("string")
        .rules(DatascanDataQualitySpecRuleArgs.builder()
            .dimension("string")
            .rangeExpectation(DatascanDataQualitySpecRuleRangeExpectationArgs.builder()
                .maxValue("string")
                .minValue("string")
                .strictMaxEnabled(false)
                .strictMinEnabled(false)
                .build())
            .rowConditionExpectation(DatascanDataQualitySpecRuleRowConditionExpectationArgs.builder()
                .sqlExpression("string")
                .build())
            .ignoreNull(false)
            .name("string")
            .nonNullExpectation()
            .column("string")
            .regexExpectation(DatascanDataQualitySpecRuleRegexExpectationArgs.builder()
                .regex("string")
                .build())
            .description("string")
            .setExpectation(DatascanDataQualitySpecRuleSetExpectationArgs.builder()
                .values("string")
                .build())
            .sqlAssertion(DatascanDataQualitySpecRuleSqlAssertionArgs.builder()
                .sqlStatement("string")
                .build())
            .statisticRangeExpectation(DatascanDataQualitySpecRuleStatisticRangeExpectationArgs.builder()
                .statistic("string")
                .maxValue("string")
                .minValue("string")
                .strictMaxEnabled(false)
                .strictMinEnabled(false)
                .build())
            .tableConditionExpectation(DatascanDataQualitySpecRuleTableConditionExpectationArgs.builder()
                .sqlExpression("string")
                .build())
            .threshold(0)
            .uniquenessExpectation()
            .build())
        .samplingPercent(0)
        .build())
    .description("string")
    .displayName("string")
    .labels(Map.of("string", "string"))
    .project("string")
    .build());
datascan_resource = gcp.dataplex.Datascan("datascanResource",
    data={
        "entity": "string",
        "resource": "string",
    },
    data_scan_id="string",
    execution_spec={
        "trigger": {
            "on_demand": {},
            "schedule": {
                "cron": "string",
            },
        },
        "field": "string",
    },
    location="string",
    data_profile_spec={
        "exclude_fields": {
            "field_names": ["string"],
        },
        "include_fields": {
            "field_names": ["string"],
        },
        "post_scan_actions": {
            "bigquery_export": {
                "results_table": "string",
            },
        },
        "row_filter": "string",
        "sampling_percent": 0,
    },
    data_quality_spec={
        "post_scan_actions": {
            "bigquery_export": {
                "results_table": "string",
            },
        },
        "row_filter": "string",
        "rules": [{
            "dimension": "string",
            "range_expectation": {
                "max_value": "string",
                "min_value": "string",
                "strict_max_enabled": False,
                "strict_min_enabled": False,
            },
            "row_condition_expectation": {
                "sql_expression": "string",
            },
            "ignore_null": False,
            "name": "string",
            "non_null_expectation": {},
            "column": "string",
            "regex_expectation": {
                "regex": "string",
            },
            "description": "string",
            "set_expectation": {
                "values": ["string"],
            },
            "sql_assertion": {
                "sql_statement": "string",
            },
            "statistic_range_expectation": {
                "statistic": "string",
                "max_value": "string",
                "min_value": "string",
                "strict_max_enabled": False,
                "strict_min_enabled": False,
            },
            "table_condition_expectation": {
                "sql_expression": "string",
            },
            "threshold": 0,
            "uniqueness_expectation": {},
        }],
        "sampling_percent": 0,
    },
    description="string",
    display_name="string",
    labels={
        "string": "string",
    },
    project="string")
const datascanResource = new gcp.dataplex.Datascan("datascanResource", {
    data: {
        entity: "string",
        resource: "string",
    },
    dataScanId: "string",
    executionSpec: {
        trigger: {
            onDemand: {},
            schedule: {
                cron: "string",
            },
        },
        field: "string",
    },
    location: "string",
    dataProfileSpec: {
        excludeFields: {
            fieldNames: ["string"],
        },
        includeFields: {
            fieldNames: ["string"],
        },
        postScanActions: {
            bigqueryExport: {
                resultsTable: "string",
            },
        },
        rowFilter: "string",
        samplingPercent: 0,
    },
    dataQualitySpec: {
        postScanActions: {
            bigqueryExport: {
                resultsTable: "string",
            },
        },
        rowFilter: "string",
        rules: [{
            dimension: "string",
            rangeExpectation: {
                maxValue: "string",
                minValue: "string",
                strictMaxEnabled: false,
                strictMinEnabled: false,
            },
            rowConditionExpectation: {
                sqlExpression: "string",
            },
            ignoreNull: false,
            name: "string",
            nonNullExpectation: {},
            column: "string",
            regexExpectation: {
                regex: "string",
            },
            description: "string",
            setExpectation: {
                values: ["string"],
            },
            sqlAssertion: {
                sqlStatement: "string",
            },
            statisticRangeExpectation: {
                statistic: "string",
                maxValue: "string",
                minValue: "string",
                strictMaxEnabled: false,
                strictMinEnabled: false,
            },
            tableConditionExpectation: {
                sqlExpression: "string",
            },
            threshold: 0,
            uniquenessExpectation: {},
        }],
        samplingPercent: 0,
    },
    description: "string",
    displayName: "string",
    labels: {
        string: "string",
    },
    project: "string",
});
type: gcp:dataplex:Datascan
properties:
    data:
        entity: string
        resource: string
    dataProfileSpec:
        excludeFields:
            fieldNames:
                - string
        includeFields:
            fieldNames:
                - string
        postScanActions:
            bigqueryExport:
                resultsTable: string
        rowFilter: string
        samplingPercent: 0
    dataQualitySpec:
        postScanActions:
            bigqueryExport:
                resultsTable: string
        rowFilter: string
        rules:
            - column: string
              description: string
              dimension: string
              ignoreNull: false
              name: string
              nonNullExpectation: {}
              rangeExpectation:
                maxValue: string
                minValue: string
                strictMaxEnabled: false
                strictMinEnabled: false
              regexExpectation:
                regex: string
              rowConditionExpectation:
                sqlExpression: string
              setExpectation:
                values:
                    - string
              sqlAssertion:
                sqlStatement: string
              statisticRangeExpectation:
                maxValue: string
                minValue: string
                statistic: string
                strictMaxEnabled: false
                strictMinEnabled: false
              tableConditionExpectation:
                sqlExpression: string
              threshold: 0
              uniquenessExpectation: {}
        samplingPercent: 0
    dataScanId: string
    description: string
    displayName: string
    executionSpec:
        field: string
        trigger:
            onDemand: {}
            schedule:
                cron: string
    labels:
        string: string
    location: string
    project: string
Datascan Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Datascan resource accepts the following input properties:
- Data
DatascanData 
- The data source for DataScan. Structure is documented below.
- DataScan stringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- ExecutionSpec DatascanExecution Spec 
- DataScan execution settings. Structure is documented below.
- Location string
- The location where the data scan should reside.
- DataProfile DatascanSpec Data Profile Spec 
- DataProfileScan related setting.
- DataQuality DatascanSpec Data Quality Spec 
- DataQualityScan related setting.
- Description string
- Description of the scan.
- DisplayName string
- User friendly display name.
- Labels Dictionary<string, string>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Project string
- Data
DatascanData Args 
- The data source for DataScan. Structure is documented below.
- DataScan stringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- ExecutionSpec DatascanExecution Spec Args 
- DataScan execution settings. Structure is documented below.
- Location string
- The location where the data scan should reside.
- DataProfile DatascanSpec Data Profile Spec Args 
- DataProfileScan related setting.
- DataQuality DatascanSpec Data Quality Spec Args 
- DataQualityScan related setting.
- Description string
- Description of the scan.
- DisplayName string
- User friendly display name.
- Labels map[string]string
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Project string
- data
DatascanData 
- The data source for DataScan. Structure is documented below.
- dataScan StringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- executionSpec DatascanExecution Spec 
- DataScan execution settings. Structure is documented below.
- location String
- The location where the data scan should reside.
- dataProfile DatascanSpec Data Profile Spec 
- DataProfileScan related setting.
- dataQuality DatascanSpec Data Quality Spec 
- DataQualityScan related setting.
- description String
- Description of the scan.
- displayName String
- User friendly display name.
- labels Map<String,String>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- project String
- data
DatascanData 
- The data source for DataScan. Structure is documented below.
- dataScan stringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- executionSpec DatascanExecution Spec 
- DataScan execution settings. Structure is documented below.
- location string
- The location where the data scan should reside.
- dataProfile DatascanSpec Data Profile Spec 
- DataProfileScan related setting.
- dataQuality DatascanSpec Data Quality Spec 
- DataQualityScan related setting.
- description string
- Description of the scan.
- displayName string
- User friendly display name.
- labels {[key: string]: string}
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- project string
- data
DatascanData Args 
- The data source for DataScan. Structure is documented below.
- data_scan_ strid 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- execution_spec DatascanExecution Spec Args 
- DataScan execution settings. Structure is documented below.
- location str
- The location where the data scan should reside.
- data_profile_ Datascanspec Data Profile Spec Args 
- DataProfileScan related setting.
- data_quality_ Datascanspec Data Quality Spec Args 
- DataQualityScan related setting.
- description str
- Description of the scan.
- display_name str
- User friendly display name.
- labels Mapping[str, str]
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- project str
- data Property Map
- The data source for DataScan. Structure is documented below.
- dataScan StringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- executionSpec Property Map
- DataScan execution settings. Structure is documented below.
- location String
- The location where the data scan should reside.
- dataProfile Property MapSpec 
- DataProfileScan related setting.
- dataQuality Property MapSpec 
- DataQualityScan related setting.
- description String
- Description of the scan.
- displayName String
- User friendly display name.
- labels Map<String>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- project String
Outputs
All input properties are implicitly available as output properties. Additionally, the Datascan resource produces the following output properties:
- CreateTime string
- The time when the scan was created.
- EffectiveLabels Dictionary<string, string>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- ExecutionStatuses List<DatascanExecution Status> 
- Status of the data scan execution. Structure is documented below.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- PulumiLabels Dictionary<string, string>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- Current state of the DataScan.
- Type string
- The type of DataScan.
- Uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- UpdateTime string
- The time when the scan was last updated.
- CreateTime string
- The time when the scan was created.
- EffectiveLabels map[string]string
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- ExecutionStatuses []DatascanExecution Status 
- Status of the data scan execution. Structure is documented below.
- Id string
- The provider-assigned unique ID for this managed resource.
- Name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- PulumiLabels map[string]string
- The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- Current state of the DataScan.
- Type string
- The type of DataScan.
- Uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- UpdateTime string
- The time when the scan was last updated.
- createTime String
- The time when the scan was created.
- effectiveLabels Map<String,String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- executionStatuses List<DatascanExecution Status> 
- Status of the data scan execution. Structure is documented below.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- pulumiLabels Map<String,String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- Current state of the DataScan.
- type String
- The type of DataScan.
- uid String
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- updateTime String
- The time when the scan was last updated.
- createTime string
- The time when the scan was created.
- effectiveLabels {[key: string]: string}
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- executionStatuses DatascanExecution Status[] 
- Status of the data scan execution. Structure is documented below.
- id string
- The provider-assigned unique ID for this managed resource.
- name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- pulumiLabels {[key: string]: string}
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state string
- Current state of the DataScan.
- type string
- The type of DataScan.
- uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- updateTime string
- The time when the scan was last updated.
- create_time str
- The time when the scan was created.
- effective_labels Mapping[str, str]
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution_statuses Sequence[DatascanExecution Status] 
- Status of the data scan execution. Structure is documented below.
- id str
- The provider-assigned unique ID for this managed resource.
- name str
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- pulumi_labels Mapping[str, str]
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state str
- Current state of the DataScan.
- type str
- The type of DataScan.
- uid str
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update_time str
- The time when the scan was last updated.
- createTime String
- The time when the scan was created.
- effectiveLabels Map<String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- executionStatuses List<Property Map>
- Status of the data scan execution. Structure is documented below.
- id String
- The provider-assigned unique ID for this managed resource.
- name String
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- pulumiLabels Map<String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- Current state of the DataScan.
- type String
- The type of DataScan.
- uid String
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- updateTime String
- The time when the scan was last updated.
Look up Existing Datascan Resource
Get an existing Datascan resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: DatascanState, opts?: CustomResourceOptions): Datascan@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        create_time: Optional[str] = None,
        data: Optional[DatascanDataArgs] = None,
        data_profile_spec: Optional[DatascanDataProfileSpecArgs] = None,
        data_quality_spec: Optional[DatascanDataQualitySpecArgs] = None,
        data_scan_id: Optional[str] = None,
        description: Optional[str] = None,
        display_name: Optional[str] = None,
        effective_labels: Optional[Mapping[str, str]] = None,
        execution_spec: Optional[DatascanExecutionSpecArgs] = None,
        execution_statuses: Optional[Sequence[DatascanExecutionStatusArgs]] = None,
        labels: Optional[Mapping[str, str]] = None,
        location: Optional[str] = None,
        name: Optional[str] = None,
        project: Optional[str] = None,
        pulumi_labels: Optional[Mapping[str, str]] = None,
        state: Optional[str] = None,
        type: Optional[str] = None,
        uid: Optional[str] = None,
        update_time: Optional[str] = None) -> Datascanfunc GetDatascan(ctx *Context, name string, id IDInput, state *DatascanState, opts ...ResourceOption) (*Datascan, error)public static Datascan Get(string name, Input<string> id, DatascanState? state, CustomResourceOptions? opts = null)public static Datascan get(String name, Output<String> id, DatascanState state, CustomResourceOptions options)resources:  _:    type: gcp:dataplex:Datascan    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- CreateTime string
- The time when the scan was created.
- Data
DatascanData 
- The data source for DataScan. Structure is documented below.
- DataProfile DatascanSpec Data Profile Spec 
- DataProfileScan related setting.
- DataQuality DatascanSpec Data Quality Spec 
- DataQualityScan related setting.
- DataScan stringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- Description string
- Description of the scan.
- DisplayName string
- User friendly display name.
- EffectiveLabels Dictionary<string, string>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- ExecutionSpec DatascanExecution Spec 
- DataScan execution settings. Structure is documented below.
- ExecutionStatuses List<DatascanExecution Status> 
- Status of the data scan execution. Structure is documented below.
- Labels Dictionary<string, string>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Location string
- The location where the data scan should reside.
- Name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- Project string
- PulumiLabels Dictionary<string, string>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- Current state of the DataScan.
- Type string
- The type of DataScan.
- Uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- UpdateTime string
- The time when the scan was last updated.
- CreateTime string
- The time when the scan was created.
- Data
DatascanData Args 
- The data source for DataScan. Structure is documented below.
- DataProfile DatascanSpec Data Profile Spec Args 
- DataProfileScan related setting.
- DataQuality DatascanSpec Data Quality Spec Args 
- DataQualityScan related setting.
- DataScan stringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- Description string
- Description of the scan.
- DisplayName string
- User friendly display name.
- EffectiveLabels map[string]string
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- ExecutionSpec DatascanExecution Spec Args 
- DataScan execution settings. Structure is documented below.
- ExecutionStatuses []DatascanExecution Status Args 
- Status of the data scan execution. Structure is documented below.
- Labels map[string]string
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- Location string
- The location where the data scan should reside.
- Name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- Project string
- PulumiLabels map[string]string
- The combination of labels configured directly on the resource and default labels configured on the provider.
- State string
- Current state of the DataScan.
- Type string
- The type of DataScan.
- Uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- UpdateTime string
- The time when the scan was last updated.
- createTime String
- The time when the scan was created.
- data
DatascanData 
- The data source for DataScan. Structure is documented below.
- dataProfile DatascanSpec Data Profile Spec 
- DataProfileScan related setting.
- dataQuality DatascanSpec Data Quality Spec 
- DataQualityScan related setting.
- dataScan StringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- description String
- Description of the scan.
- displayName String
- User friendly display name.
- effectiveLabels Map<String,String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- executionSpec DatascanExecution Spec 
- DataScan execution settings. Structure is documented below.
- executionStatuses List<DatascanExecution Status> 
- Status of the data scan execution. Structure is documented below.
- labels Map<String,String>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- location String
- The location where the data scan should reside.
- name String
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- project String
- pulumiLabels Map<String,String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- Current state of the DataScan.
- type String
- The type of DataScan.
- uid String
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- updateTime String
- The time when the scan was last updated.
- createTime string
- The time when the scan was created.
- data
DatascanData 
- The data source for DataScan. Structure is documented below.
- dataProfile DatascanSpec Data Profile Spec 
- DataProfileScan related setting.
- dataQuality DatascanSpec Data Quality Spec 
- DataQualityScan related setting.
- dataScan stringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- description string
- Description of the scan.
- displayName string
- User friendly display name.
- effectiveLabels {[key: string]: string}
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- executionSpec DatascanExecution Spec 
- DataScan execution settings. Structure is documented below.
- executionStatuses DatascanExecution Status[] 
- Status of the data scan execution. Structure is documented below.
- labels {[key: string]: string}
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- location string
- The location where the data scan should reside.
- name string
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- project string
- pulumiLabels {[key: string]: string}
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state string
- Current state of the DataScan.
- type string
- The type of DataScan.
- uid string
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- updateTime string
- The time when the scan was last updated.
- create_time str
- The time when the scan was created.
- data
DatascanData Args 
- The data source for DataScan. Structure is documented below.
- data_profile_ Datascanspec Data Profile Spec Args 
- DataProfileScan related setting.
- data_quality_ Datascanspec Data Quality Spec Args 
- DataQualityScan related setting.
- data_scan_ strid 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- description str
- Description of the scan.
- display_name str
- User friendly display name.
- effective_labels Mapping[str, str]
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- execution_spec DatascanExecution Spec Args 
- DataScan execution settings. Structure is documented below.
- execution_statuses Sequence[DatascanExecution Status Args] 
- Status of the data scan execution. Structure is documented below.
- labels Mapping[str, str]
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- location str
- The location where the data scan should reside.
- name str
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- project str
- pulumi_labels Mapping[str, str]
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state str
- Current state of the DataScan.
- type str
- The type of DataScan.
- uid str
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- update_time str
- The time when the scan was last updated.
- createTime String
- The time when the scan was created.
- data Property Map
- The data source for DataScan. Structure is documented below.
- dataProfile Property MapSpec 
- DataProfileScan related setting.
- dataQuality Property MapSpec 
- DataQualityScan related setting.
- dataScan StringId 
- DataScan identifier. Must contain only lowercase letters, numbers and hyphens. Must start with a letter. Must end with a number or a letter.
- description String
- Description of the scan.
- displayName String
- User friendly display name.
- effectiveLabels Map<String>
- All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
- executionSpec Property Map
- DataScan execution settings. Structure is documented below.
- executionStatuses List<Property Map>
- Status of the data scan execution. Structure is documented below.
- labels Map<String>
- User-defined labels for the scan. A list of key->value pairs. Note: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource.
- location String
- The location where the data scan should reside.
- name String
- The relative resource name of the scan, of the form: projects/{project}/locations/{locationId}/dataScans/{datascan_id}, where project refers to a project_id or project_number and locationId refers to a GCP region.
- project String
- pulumiLabels Map<String>
- The combination of labels configured directly on the resource and default labels configured on the provider.
- state String
- Current state of the DataScan.
- type String
- The type of DataScan.
- uid String
- System generated globally unique ID for the scan. This ID will be different if the scan is deleted and re-created with the same name.
- updateTime String
- The time when the scan was last updated.
Supporting Types
DatascanData, DatascanDataArgs    
- Entity string
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- Resource string
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- Entity string
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- Resource string
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- entity String
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- resource String
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- entity string
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- resource string
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- entity str
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- resource str
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
- entity String
- The Dataplex entity that represents the data source(e.g. BigQuery table) for Datascan.
- resource String
- The service-qualified full resource name of the cloud resource for a DataScan job to scan against. The field could be: (Cloud Storage bucket for DataDiscoveryScan)BigQuery table of type "TABLE" for DataProfileScan/DataQualityScan.
DatascanDataProfileSpec, DatascanDataProfileSpecArgs        
- ExcludeFields DatascanData Profile Spec Exclude Fields 
- The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of include_fieldsvalue. Structure is documented below.
- IncludeFields DatascanData Profile Spec Include Fields 
- The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.
- PostScan DatascanActions Data Profile Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- RowFilter string
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- SamplingPercent double
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- ExcludeFields DatascanData Profile Spec Exclude Fields 
- The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of include_fieldsvalue. Structure is documented below.
- IncludeFields DatascanData Profile Spec Include Fields 
- The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.
- PostScan DatascanActions Data Profile Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- RowFilter string
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- SamplingPercent float64
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- excludeFields DatascanData Profile Spec Exclude Fields 
- The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of include_fieldsvalue. Structure is documented below.
- includeFields DatascanData Profile Spec Include Fields 
- The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.
- postScan DatascanActions Data Profile Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- rowFilter String
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- samplingPercent Double
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- excludeFields DatascanData Profile Spec Exclude Fields 
- The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of include_fieldsvalue. Structure is documented below.
- includeFields DatascanData Profile Spec Include Fields 
- The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.
- postScan DatascanActions Data Profile Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- rowFilter string
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- samplingPercent number
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- exclude_fields DatascanData Profile Spec Exclude Fields 
- The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of include_fieldsvalue. Structure is documented below.
- include_fields DatascanData Profile Spec Include Fields 
- The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.
- post_scan_ Datascanactions Data Profile Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- row_filter str
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- sampling_percent float
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- excludeFields Property Map
- The fields to exclude from data profile.
If specified, the fields will be excluded from data profile, regardless of include_fieldsvalue. Structure is documented below.
- includeFields Property Map
- The fields to include in data profile.
If not specified, all fields at the time of profile scan job execution are included, except for ones listed in exclude_fields. Structure is documented below.
- postScan Property MapActions 
- Actions to take upon job completion. Structure is documented below.
- rowFilter String
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- samplingPercent Number
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
DatascanDataProfileSpecExcludeFields, DatascanDataProfileSpecExcludeFieldsArgs            
- FieldNames List<string>
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- FieldNames []string
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- fieldNames List<String>
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- fieldNames string[]
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field_names Sequence[str]
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- fieldNames List<String>
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
DatascanDataProfileSpecIncludeFields, DatascanDataProfileSpecIncludeFieldsArgs            
- FieldNames List<string>
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- FieldNames []string
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- fieldNames List<String>
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- fieldNames string[]
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- field_names Sequence[str]
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
- fieldNames List<String>
- Expected input is a list of fully qualified names of fields as in the schema. Only top-level field names for nested fields are supported. For instance, if 'x' is of nested field type, listing 'x' is supported but 'x.y.z' is not supported. Here 'y' and 'y.z' are nested fields of 'x'.
DatascanDataProfileSpecPostScanActions, DatascanDataProfileSpecPostScanActionsArgs              
- BigqueryExport DatascanData Profile Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- BigqueryExport DatascanData Profile Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigqueryExport DatascanData Profile Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigqueryExport DatascanData Profile Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery_export DatascanData Profile Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigqueryExport Property Map
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
DatascanDataProfileSpecPostScanActionsBigqueryExport, DatascanDataProfileSpecPostScanActionsBigqueryExportArgs                  
- ResultsTable string
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- ResultsTable string
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- resultsTable String
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- resultsTable string
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results_table str
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- resultsTable String
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
DatascanDataQualitySpec, DatascanDataQualitySpecArgs        
- PostScan DatascanActions Data Quality Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- RowFilter string
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- Rules
List<DatascanData Quality Spec Rule> 
- The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- SamplingPercent double
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- PostScan DatascanActions Data Quality Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- RowFilter string
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- Rules
[]DatascanData Quality Spec Rule 
- The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- SamplingPercent float64
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- postScan DatascanActions Data Quality Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- rowFilter String
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- rules
List<DatascanData Quality Spec Rule> 
- The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- samplingPercent Double
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- postScan DatascanActions Data Quality Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- rowFilter string
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- rules
DatascanData Quality Spec Rule[] 
- The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- samplingPercent number
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- post_scan_ Datascanactions Data Quality Spec Post Scan Actions 
- Actions to take upon job completion. Structure is documented below.
- row_filter str
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- rules
Sequence[DatascanData Quality Spec Rule] 
- The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- sampling_percent float
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
- postScan Property MapActions 
- Actions to take upon job completion. Structure is documented below.
- rowFilter String
- A filter applied to all rows in a single DataScan job. The filter needs to be a valid SQL expression for a WHERE clause in BigQuery standard SQL syntax. Example: col1 >= 0 AND col2 < 10
- rules List<Property Map>
- The list of rules to evaluate against a data source. At least one rule is required. Structure is documented below.
- samplingPercent Number
- The percentage of the records to be selected from the dataset for DataScan.
Value can range between 0.0 and 100.0 with up to 3 significant decimal digits.
Sampling is not applied if sampling_percentis not specified, 0 or 100.
DatascanDataQualitySpecPostScanActions, DatascanDataQualitySpecPostScanActionsArgs              
- BigqueryExport DatascanData Quality Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- BigqueryExport DatascanData Quality Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigqueryExport DatascanData Quality Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigqueryExport DatascanData Quality Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigquery_export DatascanData Quality Spec Post Scan Actions Bigquery Export 
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
- bigqueryExport Property Map
- If set, results will be exported to the provided BigQuery table. Structure is documented below.
DatascanDataQualitySpecPostScanActionsBigqueryExport, DatascanDataQualitySpecPostScanActionsBigqueryExportArgs                  
- ResultsTable string
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- ResultsTable string
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- resultsTable String
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- resultsTable string
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- results_table str
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
- resultsTable String
- The BigQuery table to export DataProfileScan results to. Format://bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID/tables/TABLE_ID
DatascanDataQualitySpecRule, DatascanDataQualitySpecRuleArgs          
- Dimension string
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- Column string
- The unnested column which this rule is evaluated against.
- Description string
- Description of the rule. The maximum length is 1,024 characters.
- IgnoreNull bool
- Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- Name string
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- NonNull DatascanExpectation Data Quality Spec Rule Non Null Expectation 
- ColumnMap rule which evaluates whether each column value is null.
- RangeExpectation DatascanData Quality Spec Rule Range Expectation 
- ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- RegexExpectation DatascanData Quality Spec Rule Regex Expectation 
- ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- RowCondition DatascanExpectation Data Quality Spec Rule Row Condition Expectation 
- Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- SetExpectation DatascanData Quality Spec Rule Set Expectation 
- ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- SqlAssertion DatascanData Quality Spec Rule Sql Assertion 
- Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- StatisticRange DatascanExpectation Data Quality Spec Rule Statistic Range Expectation 
- ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- TableCondition DatascanExpectation Data Quality Spec Rule Table Condition Expectation 
- Table rule which evaluates whether the provided expression is true. Structure is documented below.
- Threshold double
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- UniquenessExpectation DatascanData Quality Spec Rule Uniqueness Expectation 
- Row-level rule which evaluates whether each column value is unique.
- Dimension string
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- Column string
- The unnested column which this rule is evaluated against.
- Description string
- Description of the rule. The maximum length is 1,024 characters.
- IgnoreNull bool
- Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- Name string
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- NonNull DatascanExpectation Data Quality Spec Rule Non Null Expectation 
- ColumnMap rule which evaluates whether each column value is null.
- RangeExpectation DatascanData Quality Spec Rule Range Expectation 
- ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- RegexExpectation DatascanData Quality Spec Rule Regex Expectation 
- ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- RowCondition DatascanExpectation Data Quality Spec Rule Row Condition Expectation 
- Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- SetExpectation DatascanData Quality Spec Rule Set Expectation 
- ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- SqlAssertion DatascanData Quality Spec Rule Sql Assertion 
- Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- StatisticRange DatascanExpectation Data Quality Spec Rule Statistic Range Expectation 
- ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- TableCondition DatascanExpectation Data Quality Spec Rule Table Condition Expectation 
- Table rule which evaluates whether the provided expression is true. Structure is documented below.
- Threshold float64
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- UniquenessExpectation DatascanData Quality Spec Rule Uniqueness Expectation 
- Row-level rule which evaluates whether each column value is unique.
- dimension String
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- column String
- The unnested column which this rule is evaluated against.
- description String
- Description of the rule. The maximum length is 1,024 characters.
- ignoreNull Boolean
- Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- name String
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- nonNull DatascanExpectation Data Quality Spec Rule Non Null Expectation 
- ColumnMap rule which evaluates whether each column value is null.
- rangeExpectation DatascanData Quality Spec Rule Range Expectation 
- ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- regexExpectation DatascanData Quality Spec Rule Regex Expectation 
- ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- rowCondition DatascanExpectation Data Quality Spec Rule Row Condition Expectation 
- Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- setExpectation DatascanData Quality Spec Rule Set Expectation 
- ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- sqlAssertion DatascanData Quality Spec Rule Sql Assertion 
- Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- statisticRange DatascanExpectation Data Quality Spec Rule Statistic Range Expectation 
- ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- tableCondition DatascanExpectation Data Quality Spec Rule Table Condition Expectation 
- Table rule which evaluates whether the provided expression is true. Structure is documented below.
- threshold Double
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- uniquenessExpectation DatascanData Quality Spec Rule Uniqueness Expectation 
- Row-level rule which evaluates whether each column value is unique.
- dimension string
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- column string
- The unnested column which this rule is evaluated against.
- description string
- Description of the rule. The maximum length is 1,024 characters.
- ignoreNull boolean
- Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- name string
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- nonNull DatascanExpectation Data Quality Spec Rule Non Null Expectation 
- ColumnMap rule which evaluates whether each column value is null.
- rangeExpectation DatascanData Quality Spec Rule Range Expectation 
- ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- regexExpectation DatascanData Quality Spec Rule Regex Expectation 
- ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- rowCondition DatascanExpectation Data Quality Spec Rule Row Condition Expectation 
- Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- setExpectation DatascanData Quality Spec Rule Set Expectation 
- ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- sqlAssertion DatascanData Quality Spec Rule Sql Assertion 
- Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- statisticRange DatascanExpectation Data Quality Spec Rule Statistic Range Expectation 
- ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- tableCondition DatascanExpectation Data Quality Spec Rule Table Condition Expectation 
- Table rule which evaluates whether the provided expression is true. Structure is documented below.
- threshold number
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- uniquenessExpectation DatascanData Quality Spec Rule Uniqueness Expectation 
- Row-level rule which evaluates whether each column value is unique.
- dimension str
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- column str
- The unnested column which this rule is evaluated against.
- description str
- Description of the rule. The maximum length is 1,024 characters.
- ignore_null bool
- Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- name str
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- non_null_ Datascanexpectation Data Quality Spec Rule Non Null Expectation 
- ColumnMap rule which evaluates whether each column value is null.
- range_expectation DatascanData Quality Spec Rule Range Expectation 
- ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- regex_expectation DatascanData Quality Spec Rule Regex Expectation 
- ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- row_condition_ Datascanexpectation Data Quality Spec Rule Row Condition Expectation 
- Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- set_expectation DatascanData Quality Spec Rule Set Expectation 
- ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- sql_assertion DatascanData Quality Spec Rule Sql Assertion 
- Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- statistic_range_ Datascanexpectation Data Quality Spec Rule Statistic Range Expectation 
- ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- table_condition_ Datascanexpectation Data Quality Spec Rule Table Condition Expectation 
- Table rule which evaluates whether the provided expression is true. Structure is documented below.
- threshold float
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- uniqueness_expectation DatascanData Quality Spec Rule Uniqueness Expectation 
- Row-level rule which evaluates whether each column value is unique.
- dimension String
- The dimension a rule belongs to. Results are also aggregated at the dimension level. Supported dimensions are ["COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY"]
- column String
- The unnested column which this rule is evaluated against.
- description String
- Description of the rule. The maximum length is 1,024 characters.
- ignoreNull Boolean
- Rows with null values will automatically fail a rule, unless ignoreNull is true. In that case, such null rows are trivially considered passing. Only applicable to ColumnMap rules.
- name String
- A mutable name for the rule. The name must contain only letters (a-z, A-Z), numbers (0-9), or hyphens (-). The maximum length is 63 characters. Must start with a letter. Must end with a number or a letter.
- nonNull Property MapExpectation 
- ColumnMap rule which evaluates whether each column value is null.
- rangeExpectation Property Map
- ColumnMap rule which evaluates whether each column value lies between a specified range. Structure is documented below.
- regexExpectation Property Map
- ColumnMap rule which evaluates whether each column value matches a specified regex. Structure is documented below.
- rowCondition Property MapExpectation 
- Table rule which evaluates whether each row passes the specified condition. Structure is documented below.
- setExpectation Property Map
- ColumnMap rule which evaluates whether each column value is contained by a specified set. Structure is documented below.
- sqlAssertion Property Map
- Table rule which evaluates whether any row matches invalid state. Structure is documented below.
- statisticRange Property MapExpectation 
- ColumnAggregate rule which evaluates whether the column aggregate statistic lies between a specified range. Structure is documented below.
- tableCondition Property MapExpectation 
- Table rule which evaluates whether the provided expression is true. Structure is documented below.
- threshold Number
- The minimum ratio of passing_rows / total_rows required to pass this rule, with a range of [0.0, 1.0]. 0 indicates default value (i.e. 1.0).
- uniquenessExpectation Property Map
- Row-level rule which evaluates whether each column value is unique.
DatascanDataQualitySpecRuleRangeExpectation, DatascanDataQualitySpecRuleRangeExpectationArgs              
- MaxValue string
- The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- MinValue string
- The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- StrictMax boolEnabled 
- Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- StrictMin boolEnabled 
- Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- MaxValue string
- The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- MinValue string
- The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- StrictMax boolEnabled 
- Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- StrictMin boolEnabled 
- Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- maxValue String
- The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- minValue String
- The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strictMax BooleanEnabled 
- Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strictMin BooleanEnabled 
- Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- maxValue string
- The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- minValue string
- The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strictMax booleanEnabled 
- Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strictMin booleanEnabled 
- Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- max_value str
- The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min_value str
- The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict_max_ boolenabled 
- Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict_min_ boolenabled 
- Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- maxValue String
- The maximum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- minValue String
- The minimum column value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strictMax BooleanEnabled 
- Whether each value needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strictMin BooleanEnabled 
- Whether each value needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
DatascanDataQualitySpecRuleRegexExpectation, DatascanDataQualitySpecRuleRegexExpectationArgs              
- Regex string
- A regular expression the column value is expected to match.
- Regex string
- A regular expression the column value is expected to match.
- regex String
- A regular expression the column value is expected to match.
- regex string
- A regular expression the column value is expected to match.
- regex str
- A regular expression the column value is expected to match.
- regex String
- A regular expression the column value is expected to match.
DatascanDataQualitySpecRuleRowConditionExpectation, DatascanDataQualitySpecRuleRowConditionExpectationArgs                
- SqlExpression string
- The SQL expression.
- SqlExpression string
- The SQL expression.
- sqlExpression String
- The SQL expression.
- sqlExpression string
- The SQL expression.
- sql_expression str
- The SQL expression.
- sqlExpression String
- The SQL expression.
DatascanDataQualitySpecRuleSetExpectation, DatascanDataQualitySpecRuleSetExpectationArgs              
- Values List<string>
- Expected values for the column value.
- Values []string
- Expected values for the column value.
- values List<String>
- Expected values for the column value.
- values string[]
- Expected values for the column value.
- values Sequence[str]
- Expected values for the column value.
- values List<String>
- Expected values for the column value.
DatascanDataQualitySpecRuleSqlAssertion, DatascanDataQualitySpecRuleSqlAssertionArgs              
- SqlStatement string
- The SQL statement.
- SqlStatement string
- The SQL statement.
- sqlStatement String
- The SQL statement.
- sqlStatement string
- The SQL statement.
- sql_statement str
- The SQL statement.
- sqlStatement String
- The SQL statement.
DatascanDataQualitySpecRuleStatisticRangeExpectation, DatascanDataQualitySpecRuleStatisticRangeExpectationArgs                
- Statistic string
- column statistics.
Possible values are: STATISTIC_UNDEFINED,MEAN,MIN,MAX.
- MaxValue string
- The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- MinValue string
- The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- StrictMax boolEnabled 
- Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- StrictMin boolEnabled 
- Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- Statistic string
- column statistics.
Possible values are: STATISTIC_UNDEFINED,MEAN,MIN,MAX.
- MaxValue string
- The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- MinValue string
- The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- StrictMax boolEnabled 
- Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- StrictMin boolEnabled 
- Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- statistic String
- column statistics.
Possible values are: STATISTIC_UNDEFINED,MEAN,MIN,MAX.
- maxValue String
- The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- minValue String
- The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strictMax BooleanEnabled 
- Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strictMin BooleanEnabled 
- Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- statistic string
- column statistics.
Possible values are: STATISTIC_UNDEFINED,MEAN,MIN,MAX.
- maxValue string
- The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- minValue string
- The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strictMax booleanEnabled 
- Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strictMin booleanEnabled 
- Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- statistic str
- column statistics.
Possible values are: STATISTIC_UNDEFINED,MEAN,MIN,MAX.
- max_value str
- The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- min_value str
- The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strict_max_ boolenabled 
- Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strict_min_ boolenabled 
- Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
- statistic String
- column statistics.
Possible values are: STATISTIC_UNDEFINED,MEAN,MIN,MAX.
- maxValue String
- The maximum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- minValue String
- The minimum column statistic value allowed for a row to pass this validation. At least one of minValue and maxValue need to be provided.
- strictMax BooleanEnabled 
- Whether column statistic needs to be strictly lesser than ('<') the maximum, or if equality is allowed. Only relevant if a maxValue has been defined. Default = false.
- strictMin BooleanEnabled 
- Whether column statistic needs to be strictly greater than ('>') the minimum, or if equality is allowed. Only relevant if a minValue has been defined. Default = false.
DatascanDataQualitySpecRuleTableConditionExpectation, DatascanDataQualitySpecRuleTableConditionExpectationArgs                
- SqlExpression string
- The SQL expression.
- SqlExpression string
- The SQL expression.
- sqlExpression String
- The SQL expression.
- sqlExpression string
- The SQL expression.
- sql_expression str
- The SQL expression.
- sqlExpression String
- The SQL expression.
DatascanExecutionSpec, DatascanExecutionSpecArgs      
- Trigger
DatascanExecution Spec Trigger 
- Spec related to how often and when a scan should be triggered. Structure is documented below.
- Field string
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- Trigger
DatascanExecution Spec Trigger 
- Spec related to how often and when a scan should be triggered. Structure is documented below.
- Field string
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- trigger
DatascanExecution Spec Trigger 
- Spec related to how often and when a scan should be triggered. Structure is documented below.
- field String
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- trigger
DatascanExecution Spec Trigger 
- Spec related to how often and when a scan should be triggered. Structure is documented below.
- field string
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- trigger
DatascanExecution Spec Trigger 
- Spec related to how often and when a scan should be triggered. Structure is documented below.
- field str
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
- trigger Property Map
- Spec related to how often and when a scan should be triggered. Structure is documented below.
- field String
- The unnested field (of type Date or Timestamp) that contains values which monotonically increase over time. If not specified, a data scan will run for all data in the table.
DatascanExecutionSpecTrigger, DatascanExecutionSpecTriggerArgs        
- OnDemand DatascanExecution Spec Trigger On Demand 
- The scan runs once via dataScans.run API.
- Schedule
DatascanExecution Spec Trigger Schedule 
- The scan is scheduled to run periodically. Structure is documented below.
- OnDemand DatascanExecution Spec Trigger On Demand 
- The scan runs once via dataScans.run API.
- Schedule
DatascanExecution Spec Trigger Schedule 
- The scan is scheduled to run periodically. Structure is documented below.
- onDemand DatascanExecution Spec Trigger On Demand 
- The scan runs once via dataScans.run API.
- schedule
DatascanExecution Spec Trigger Schedule 
- The scan is scheduled to run periodically. Structure is documented below.
- onDemand DatascanExecution Spec Trigger On Demand 
- The scan runs once via dataScans.run API.
- schedule
DatascanExecution Spec Trigger Schedule 
- The scan is scheduled to run periodically. Structure is documented below.
- on_demand DatascanExecution Spec Trigger On Demand 
- The scan runs once via dataScans.run API.
- schedule
DatascanExecution Spec Trigger Schedule 
- The scan is scheduled to run periodically. Structure is documented below.
- onDemand Property Map
- The scan runs once via dataScans.run API.
- schedule Property Map
- The scan is scheduled to run periodically. Structure is documented below.
DatascanExecutionSpecTriggerSchedule, DatascanExecutionSpecTriggerScheduleArgs          
- Cron string
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- Cron string
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- cron String
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- cron string
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- cron str
- Cron schedule for running scans periodically. This field is required for Schedule scans.
- cron String
- Cron schedule for running scans periodically. This field is required for Schedule scans.
DatascanExecutionStatus, DatascanExecutionStatusArgs      
- LatestJob stringEnd Time 
- (Output) The time when the latest DataScanJob started.
- LatestJob stringStart Time 
- (Output) The time when the latest DataScanJob ended.
- LatestJob stringEnd Time 
- (Output) The time when the latest DataScanJob started.
- LatestJob stringStart Time 
- (Output) The time when the latest DataScanJob ended.
- latestJob StringEnd Time 
- (Output) The time when the latest DataScanJob started.
- latestJob StringStart Time 
- (Output) The time when the latest DataScanJob ended.
- latestJob stringEnd Time 
- (Output) The time when the latest DataScanJob started.
- latestJob stringStart Time 
- (Output) The time when the latest DataScanJob ended.
- latest_job_ strend_ time 
- (Output) The time when the latest DataScanJob started.
- latest_job_ strstart_ time 
- (Output) The time when the latest DataScanJob ended.
- latestJob StringEnd Time 
- (Output) The time when the latest DataScanJob started.
- latestJob StringStart Time 
- (Output) The time when the latest DataScanJob ended.
Import
Datascan can be imported using any of these accepted formats:
- projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}
- {{project}}/{{location}}/{{data_scan_id}}
- {{location}}/{{data_scan_id}}
- {{data_scan_id}}
When using the pulumi import command, Datascan can be imported using one of the formats above. For example:
$ pulumi import gcp:dataplex/datascan:Datascan default projects/{{project}}/locations/{{location}}/dataScans/{{data_scan_id}}
$ pulumi import gcp:dataplex/datascan:Datascan default {{project}}/{{location}}/{{data_scan_id}}
$ pulumi import gcp:dataplex/datascan:Datascan default {{location}}/{{data_scan_id}}
$ pulumi import gcp:dataplex/datascan:Datascan default {{data_scan_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the google-betaTerraform Provider.