gcp.bigquery.Routine
Explore with Pulumi AI
A user-defined function or a stored procedure that belongs to a Dataset
To get more information about Routine, see:
- API documentation
- How-to Guides
Example Usage
Bigquery Routine Basic
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
const sproc = new gcp.bigquery.Routine("sproc", {
    datasetId: test.datasetId,
    routineId: "routine_id",
    routineType: "PROCEDURE",
    language: "SQL",
    definitionBody: "CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);",
});
import pulumi
import pulumi_gcp as gcp
test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
sproc = gcp.bigquery.Routine("sproc",
    dataset_id=test.dataset_id,
    routine_id="routine_id",
    routine_type="PROCEDURE",
    language="SQL",
    definition_body="CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);")
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId: pulumi.String("dataset_id"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewRoutine(ctx, "sproc", &bigquery.RoutineArgs{
			DatasetId:      test.DatasetId,
			RoutineId:      pulumi.String("routine_id"),
			RoutineType:    pulumi.String("PROCEDURE"),
			Language:       pulumi.String("SQL"),
			DefinitionBody: pulumi.String("CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var test = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "dataset_id",
    });
    var sproc = new Gcp.BigQuery.Routine("sproc", new()
    {
        DatasetId = test.DatasetId,
        RoutineId = "routine_id",
        RoutineType = "PROCEDURE",
        Language = "SQL",
        DefinitionBody = "CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);",
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Routine;
import com.pulumi.gcp.bigquery.RoutineArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var test = new Dataset("test", DatasetArgs.builder()
            .datasetId("dataset_id")
            .build());
        var sproc = new Routine("sproc", RoutineArgs.builder()
            .datasetId(test.datasetId())
            .routineId("routine_id")
            .routineType("PROCEDURE")
            .language("SQL")
            .definitionBody("CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);")
            .build());
    }
}
resources:
  test:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: dataset_id
  sproc:
    type: gcp:bigquery:Routine
    properties:
      datasetId: ${test.datasetId}
      routineId: routine_id
      routineType: PROCEDURE
      language: SQL
      definitionBody: CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);
Bigquery Routine Json
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
const sproc = new gcp.bigquery.Routine("sproc", {
    datasetId: test.datasetId,
    routineId: "routine_id",
    routineType: "SCALAR_FUNCTION",
    language: "JAVASCRIPT",
    definitionBody: "CREATE FUNCTION multiplyInputs return x*y;",
    arguments: [
        {
            name: "x",
            dataType: "{\"typeKind\" :  \"FLOAT64\"}",
        },
        {
            name: "y",
            dataType: "{\"typeKind\" :  \"FLOAT64\"}",
        },
    ],
    returnType: "{\"typeKind\" :  \"FLOAT64\"}",
});
import pulumi
import pulumi_gcp as gcp
test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
sproc = gcp.bigquery.Routine("sproc",
    dataset_id=test.dataset_id,
    routine_id="routine_id",
    routine_type="SCALAR_FUNCTION",
    language="JAVASCRIPT",
    definition_body="CREATE FUNCTION multiplyInputs return x*y;",
    arguments=[
        {
            "name": "x",
            "data_type": "{\"typeKind\" :  \"FLOAT64\"}",
        },
        {
            "name": "y",
            "data_type": "{\"typeKind\" :  \"FLOAT64\"}",
        },
    ],
    return_type="{\"typeKind\" :  \"FLOAT64\"}")
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId: pulumi.String("dataset_id"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewRoutine(ctx, "sproc", &bigquery.RoutineArgs{
			DatasetId:      test.DatasetId,
			RoutineId:      pulumi.String("routine_id"),
			RoutineType:    pulumi.String("SCALAR_FUNCTION"),
			Language:       pulumi.String("JAVASCRIPT"),
			DefinitionBody: pulumi.String("CREATE FUNCTION multiplyInputs return x*y;"),
			Arguments: bigquery.RoutineArgumentArray{
				&bigquery.RoutineArgumentArgs{
					Name:     pulumi.String("x"),
					DataType: pulumi.String("{\"typeKind\" :  \"FLOAT64\"}"),
				},
				&bigquery.RoutineArgumentArgs{
					Name:     pulumi.String("y"),
					DataType: pulumi.String("{\"typeKind\" :  \"FLOAT64\"}"),
				},
			},
			ReturnType: pulumi.String("{\"typeKind\" :  \"FLOAT64\"}"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var test = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "dataset_id",
    });
    var sproc = new Gcp.BigQuery.Routine("sproc", new()
    {
        DatasetId = test.DatasetId,
        RoutineId = "routine_id",
        RoutineType = "SCALAR_FUNCTION",
        Language = "JAVASCRIPT",
        DefinitionBody = "CREATE FUNCTION multiplyInputs return x*y;",
        Arguments = new[]
        {
            new Gcp.BigQuery.Inputs.RoutineArgumentArgs
            {
                Name = "x",
                DataType = "{\"typeKind\" :  \"FLOAT64\"}",
            },
            new Gcp.BigQuery.Inputs.RoutineArgumentArgs
            {
                Name = "y",
                DataType = "{\"typeKind\" :  \"FLOAT64\"}",
            },
        },
        ReturnType = "{\"typeKind\" :  \"FLOAT64\"}",
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Routine;
import com.pulumi.gcp.bigquery.RoutineArgs;
import com.pulumi.gcp.bigquery.inputs.RoutineArgumentArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var test = new Dataset("test", DatasetArgs.builder()
            .datasetId("dataset_id")
            .build());
        var sproc = new Routine("sproc", RoutineArgs.builder()
            .datasetId(test.datasetId())
            .routineId("routine_id")
            .routineType("SCALAR_FUNCTION")
            .language("JAVASCRIPT")
            .definitionBody("CREATE FUNCTION multiplyInputs return x*y;")
            .arguments(            
                RoutineArgumentArgs.builder()
                    .name("x")
                    .dataType("{\"typeKind\" :  \"FLOAT64\"}")
                    .build(),
                RoutineArgumentArgs.builder()
                    .name("y")
                    .dataType("{\"typeKind\" :  \"FLOAT64\"}")
                    .build())
            .returnType("{\"typeKind\" :  \"FLOAT64\"}")
            .build());
    }
}
resources:
  test:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: dataset_id
  sproc:
    type: gcp:bigquery:Routine
    properties:
      datasetId: ${test.datasetId}
      routineId: routine_id
      routineType: SCALAR_FUNCTION
      language: JAVASCRIPT
      definitionBody: CREATE FUNCTION multiplyInputs return x*y;
      arguments:
        - name: x
          dataType: '{"typeKind" :  "FLOAT64"}'
        - name: y
          dataType: '{"typeKind" :  "FLOAT64"}'
      returnType: '{"typeKind" :  "FLOAT64"}'
Bigquery Routine Tvf
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
const sproc = new gcp.bigquery.Routine("sproc", {
    datasetId: test.datasetId,
    routineId: "routine_id",
    routineType: "TABLE_VALUED_FUNCTION",
    language: "SQL",
    definitionBody: "SELECT 1 + value AS value\n",
    arguments: [{
        name: "value",
        argumentKind: "FIXED_TYPE",
        dataType: JSON.stringify({
            typeKind: "INT64",
        }),
    }],
    returnTableType: JSON.stringify({
        columns: [{
            name: "value",
            type: {
                typeKind: "INT64",
            },
        }],
    }),
});
import pulumi
import json
import pulumi_gcp as gcp
test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
sproc = gcp.bigquery.Routine("sproc",
    dataset_id=test.dataset_id,
    routine_id="routine_id",
    routine_type="TABLE_VALUED_FUNCTION",
    language="SQL",
    definition_body="SELECT 1 + value AS value\n",
    arguments=[{
        "name": "value",
        "argument_kind": "FIXED_TYPE",
        "data_type": json.dumps({
            "typeKind": "INT64",
        }),
    }],
    return_table_type=json.dumps({
        "columns": [{
            "name": "value",
            "type": {
                "typeKind": "INT64",
            },
        }],
    }))
package main
import (
	"encoding/json"
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId: pulumi.String("dataset_id"),
		})
		if err != nil {
			return err
		}
		tmpJSON0, err := json.Marshal(map[string]interface{}{
			"typeKind": "INT64",
		})
		if err != nil {
			return err
		}
		json0 := string(tmpJSON0)
		tmpJSON1, err := json.Marshal(map[string]interface{}{
			"columns": []map[string]interface{}{
				map[string]interface{}{
					"name": "value",
					"type": map[string]interface{}{
						"typeKind": "INT64",
					},
				},
			},
		})
		if err != nil {
			return err
		}
		json1 := string(tmpJSON1)
		_, err = bigquery.NewRoutine(ctx, "sproc", &bigquery.RoutineArgs{
			DatasetId:      test.DatasetId,
			RoutineId:      pulumi.String("routine_id"),
			RoutineType:    pulumi.String("TABLE_VALUED_FUNCTION"),
			Language:       pulumi.String("SQL"),
			DefinitionBody: pulumi.String("SELECT 1 + value AS value\n"),
			Arguments: bigquery.RoutineArgumentArray{
				&bigquery.RoutineArgumentArgs{
					Name:         pulumi.String("value"),
					ArgumentKind: pulumi.String("FIXED_TYPE"),
					DataType:     pulumi.String(json0),
				},
			},
			ReturnTableType: pulumi.String(json1),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var test = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "dataset_id",
    });
    var sproc = new Gcp.BigQuery.Routine("sproc", new()
    {
        DatasetId = test.DatasetId,
        RoutineId = "routine_id",
        RoutineType = "TABLE_VALUED_FUNCTION",
        Language = "SQL",
        DefinitionBody = @"SELECT 1 + value AS value
",
        Arguments = new[]
        {
            new Gcp.BigQuery.Inputs.RoutineArgumentArgs
            {
                Name = "value",
                ArgumentKind = "FIXED_TYPE",
                DataType = JsonSerializer.Serialize(new Dictionary<string, object?>
                {
                    ["typeKind"] = "INT64",
                }),
            },
        },
        ReturnTableType = JsonSerializer.Serialize(new Dictionary<string, object?>
        {
            ["columns"] = new[]
            {
                new Dictionary<string, object?>
                {
                    ["name"] = "value",
                    ["type"] = new Dictionary<string, object?>
                    {
                        ["typeKind"] = "INT64",
                    },
                },
            },
        }),
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Routine;
import com.pulumi.gcp.bigquery.RoutineArgs;
import com.pulumi.gcp.bigquery.inputs.RoutineArgumentArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var test = new Dataset("test", DatasetArgs.builder()
            .datasetId("dataset_id")
            .build());
        var sproc = new Routine("sproc", RoutineArgs.builder()
            .datasetId(test.datasetId())
            .routineId("routine_id")
            .routineType("TABLE_VALUED_FUNCTION")
            .language("SQL")
            .definitionBody("""
SELECT 1 + value AS value
            """)
            .arguments(RoutineArgumentArgs.builder()
                .name("value")
                .argumentKind("FIXED_TYPE")
                .dataType(serializeJson(
                    jsonObject(
                        jsonProperty("typeKind", "INT64")
                    )))
                .build())
            .returnTableType(serializeJson(
                jsonObject(
                    jsonProperty("columns", jsonArray(jsonObject(
                        jsonProperty("name", "value"),
                        jsonProperty("type", jsonObject(
                            jsonProperty("typeKind", "INT64")
                        ))
                    )))
                )))
            .build());
    }
}
resources:
  test:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: dataset_id
  sproc:
    type: gcp:bigquery:Routine
    properties:
      datasetId: ${test.datasetId}
      routineId: routine_id
      routineType: TABLE_VALUED_FUNCTION
      language: SQL
      definitionBody: |
        SELECT 1 + value AS value        
      arguments:
        - name: value
          argumentKind: FIXED_TYPE
          dataType:
            fn::toJSON:
              typeKind: INT64
      returnTableType:
        fn::toJSON:
          columns:
            - name: value
              type:
                typeKind: INT64
Bigquery Routine Pyspark
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
const testConnection = new gcp.bigquery.Connection("test", {
    connectionId: "connection_id",
    location: "US",
    spark: {},
});
const pyspark = new gcp.bigquery.Routine("pyspark", {
    datasetId: test.datasetId,
    routineId: "routine_id",
    routineType: "PROCEDURE",
    language: "PYTHON",
    definitionBody: `from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
    
# Load data from BigQuery.
words = spark.read.format("bigquery") \\
  .option("table", "bigquery-public-data:samples.shakespeare") \\
  .load()
words.createOrReplaceTempView("words")
    
# Perform word count.
word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
word_count.show()
word_count.printSchema()
    
# Saving the data to BigQuery
word_count.write.format("bigquery") \\
  .option("writeMethod", "direct") \\
  .save("wordcount_dataset.wordcount_output")
`,
    sparkOptions: {
        connection: testConnection.name,
        runtimeVersion: "2.1",
    },
});
import pulumi
import pulumi_gcp as gcp
test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
test_connection = gcp.bigquery.Connection("test",
    connection_id="connection_id",
    location="US",
    spark={})
pyspark = gcp.bigquery.Routine("pyspark",
    dataset_id=test.dataset_id,
    routine_id="routine_id",
    routine_type="PROCEDURE",
    language="PYTHON",
    definition_body="""from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
    
# Load data from BigQuery.
words = spark.read.format("bigquery") \
  .option("table", "bigquery-public-data:samples.shakespeare") \
  .load()
words.createOrReplaceTempView("words")
    
# Perform word count.
word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
word_count.show()
word_count.printSchema()
    
# Saving the data to BigQuery
word_count.write.format("bigquery") \
  .option("writeMethod", "direct") \
  .save("wordcount_dataset.wordcount_output")
""",
    spark_options={
        "connection": test_connection.name,
        "runtime_version": "2.1",
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId: pulumi.String("dataset_id"),
		})
		if err != nil {
			return err
		}
		testConnection, err := bigquery.NewConnection(ctx, "test", &bigquery.ConnectionArgs{
			ConnectionId: pulumi.String("connection_id"),
			Location:     pulumi.String("US"),
			Spark:        &bigquery.ConnectionSparkArgs{},
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewRoutine(ctx, "pyspark", &bigquery.RoutineArgs{
			DatasetId:   test.DatasetId,
			RoutineId:   pulumi.String("routine_id"),
			RoutineType: pulumi.String("PROCEDURE"),
			Language:    pulumi.String("PYTHON"),
			DefinitionBody: pulumi.String(`from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
    
# Load data from BigQuery.
words = spark.read.format("bigquery") \
  .option("table", "bigquery-public-data:samples.shakespeare") \
  .load()
words.createOrReplaceTempView("words")
    
# Perform word count.
word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
word_count.show()
word_count.printSchema()
    
# Saving the data to BigQuery
word_count.write.format("bigquery") \
  .option("writeMethod", "direct") \
  .save("wordcount_dataset.wordcount_output")
`),
			SparkOptions: &bigquery.RoutineSparkOptionsArgs{
				Connection:     testConnection.Name,
				RuntimeVersion: pulumi.String("2.1"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var test = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "dataset_id",
    });
    var testConnection = new Gcp.BigQuery.Connection("test", new()
    {
        ConnectionId = "connection_id",
        Location = "US",
        Spark = null,
    });
    var pyspark = new Gcp.BigQuery.Routine("pyspark", new()
    {
        DatasetId = test.DatasetId,
        RoutineId = "routine_id",
        RoutineType = "PROCEDURE",
        Language = "PYTHON",
        DefinitionBody = @"from pyspark.sql import SparkSession
spark = SparkSession.builder.appName(""spark-bigquery-demo"").getOrCreate()
    
# Load data from BigQuery.
words = spark.read.format(""bigquery"") \
  .option(""table"", ""bigquery-public-data:samples.shakespeare"") \
  .load()
words.createOrReplaceTempView(""words"")
    
# Perform word count.
word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed(""sum(word_count)"", ""sum_word_count"")
word_count.show()
word_count.printSchema()
    
# Saving the data to BigQuery
word_count.write.format(""bigquery"") \
  .option(""writeMethod"", ""direct"") \
  .save(""wordcount_dataset.wordcount_output"")
",
        SparkOptions = new Gcp.BigQuery.Inputs.RoutineSparkOptionsArgs
        {
            Connection = testConnection.Name,
            RuntimeVersion = "2.1",
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Connection;
import com.pulumi.gcp.bigquery.ConnectionArgs;
import com.pulumi.gcp.bigquery.inputs.ConnectionSparkArgs;
import com.pulumi.gcp.bigquery.Routine;
import com.pulumi.gcp.bigquery.RoutineArgs;
import com.pulumi.gcp.bigquery.inputs.RoutineSparkOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var test = new Dataset("test", DatasetArgs.builder()
            .datasetId("dataset_id")
            .build());
        var testConnection = new Connection("testConnection", ConnectionArgs.builder()
            .connectionId("connection_id")
            .location("US")
            .spark()
            .build());
        var pyspark = new Routine("pyspark", RoutineArgs.builder()
            .datasetId(test.datasetId())
            .routineId("routine_id")
            .routineType("PROCEDURE")
            .language("PYTHON")
            .definitionBody("""
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
    
# Load data from BigQuery.
words = spark.read.format("bigquery") \
  .option("table", "bigquery-public-data:samples.shakespeare") \
  .load()
words.createOrReplaceTempView("words")
    
# Perform word count.
word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
word_count.show()
word_count.printSchema()
    
# Saving the data to BigQuery
word_count.write.format("bigquery") \
  .option("writeMethod", "direct") \
  .save("wordcount_dataset.wordcount_output")
            """)
            .sparkOptions(RoutineSparkOptionsArgs.builder()
                .connection(testConnection.name())
                .runtimeVersion("2.1")
                .build())
            .build());
    }
}
resources:
  test:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: dataset_id
  testConnection:
    type: gcp:bigquery:Connection
    name: test
    properties:
      connectionId: connection_id
      location: US
      spark: {}
  pyspark:
    type: gcp:bigquery:Routine
    properties:
      datasetId: ${test.datasetId}
      routineId: routine_id
      routineType: PROCEDURE
      language: PYTHON
      definitionBody: "from pyspark.sql import SparkSession\n\nspark = SparkSession.builder.appName(\"spark-bigquery-demo\").getOrCreate()\n    \n# Load data from BigQuery.\nwords = spark.read.format(\"bigquery\") \\\n  .option(\"table\", \"bigquery-public-data:samples.shakespeare\") \\\n  .load()\nwords.createOrReplaceTempView(\"words\")\n    \n# Perform word count.\nword_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed(\"sum(word_count)\", \"sum_word_count\")\nword_count.show()\nword_count.printSchema()\n    \n# Saving the data to BigQuery\nword_count.write.format(\"bigquery\") \\\n  .option(\"writeMethod\", \"direct\") \\\n  .save(\"wordcount_dataset.wordcount_output\")\n"
      sparkOptions:
        connection: ${testConnection.name}
        runtimeVersion: '2.1'
Bigquery Routine Pyspark Mainfile
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
const testConnection = new gcp.bigquery.Connection("test", {
    connectionId: "connection_id",
    location: "US",
    spark: {},
});
const pysparkMainfile = new gcp.bigquery.Routine("pyspark_mainfile", {
    datasetId: test.datasetId,
    routineId: "routine_id",
    routineType: "PROCEDURE",
    language: "PYTHON",
    definitionBody: "",
    sparkOptions: {
        connection: testConnection.name,
        runtimeVersion: "2.1",
        mainFileUri: "gs://test-bucket/main.py",
        pyFileUris: ["gs://test-bucket/lib.py"],
        fileUris: ["gs://test-bucket/distribute_in_executor.json"],
        archiveUris: ["gs://test-bucket/distribute_in_executor.tar.gz"],
    },
});
import pulumi
import pulumi_gcp as gcp
test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
test_connection = gcp.bigquery.Connection("test",
    connection_id="connection_id",
    location="US",
    spark={})
pyspark_mainfile = gcp.bigquery.Routine("pyspark_mainfile",
    dataset_id=test.dataset_id,
    routine_id="routine_id",
    routine_type="PROCEDURE",
    language="PYTHON",
    definition_body="",
    spark_options={
        "connection": test_connection.name,
        "runtime_version": "2.1",
        "main_file_uri": "gs://test-bucket/main.py",
        "py_file_uris": ["gs://test-bucket/lib.py"],
        "file_uris": ["gs://test-bucket/distribute_in_executor.json"],
        "archive_uris": ["gs://test-bucket/distribute_in_executor.tar.gz"],
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId: pulumi.String("dataset_id"),
		})
		if err != nil {
			return err
		}
		testConnection, err := bigquery.NewConnection(ctx, "test", &bigquery.ConnectionArgs{
			ConnectionId: pulumi.String("connection_id"),
			Location:     pulumi.String("US"),
			Spark:        &bigquery.ConnectionSparkArgs{},
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewRoutine(ctx, "pyspark_mainfile", &bigquery.RoutineArgs{
			DatasetId:      test.DatasetId,
			RoutineId:      pulumi.String("routine_id"),
			RoutineType:    pulumi.String("PROCEDURE"),
			Language:       pulumi.String("PYTHON"),
			DefinitionBody: pulumi.String(""),
			SparkOptions: &bigquery.RoutineSparkOptionsArgs{
				Connection:     testConnection.Name,
				RuntimeVersion: pulumi.String("2.1"),
				MainFileUri:    pulumi.String("gs://test-bucket/main.py"),
				PyFileUris: pulumi.StringArray{
					pulumi.String("gs://test-bucket/lib.py"),
				},
				FileUris: pulumi.StringArray{
					pulumi.String("gs://test-bucket/distribute_in_executor.json"),
				},
				ArchiveUris: pulumi.StringArray{
					pulumi.String("gs://test-bucket/distribute_in_executor.tar.gz"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var test = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "dataset_id",
    });
    var testConnection = new Gcp.BigQuery.Connection("test", new()
    {
        ConnectionId = "connection_id",
        Location = "US",
        Spark = null,
    });
    var pysparkMainfile = new Gcp.BigQuery.Routine("pyspark_mainfile", new()
    {
        DatasetId = test.DatasetId,
        RoutineId = "routine_id",
        RoutineType = "PROCEDURE",
        Language = "PYTHON",
        DefinitionBody = "",
        SparkOptions = new Gcp.BigQuery.Inputs.RoutineSparkOptionsArgs
        {
            Connection = testConnection.Name,
            RuntimeVersion = "2.1",
            MainFileUri = "gs://test-bucket/main.py",
            PyFileUris = new[]
            {
                "gs://test-bucket/lib.py",
            },
            FileUris = new[]
            {
                "gs://test-bucket/distribute_in_executor.json",
            },
            ArchiveUris = new[]
            {
                "gs://test-bucket/distribute_in_executor.tar.gz",
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Connection;
import com.pulumi.gcp.bigquery.ConnectionArgs;
import com.pulumi.gcp.bigquery.inputs.ConnectionSparkArgs;
import com.pulumi.gcp.bigquery.Routine;
import com.pulumi.gcp.bigquery.RoutineArgs;
import com.pulumi.gcp.bigquery.inputs.RoutineSparkOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var test = new Dataset("test", DatasetArgs.builder()
            .datasetId("dataset_id")
            .build());
        var testConnection = new Connection("testConnection", ConnectionArgs.builder()
            .connectionId("connection_id")
            .location("US")
            .spark()
            .build());
        var pysparkMainfile = new Routine("pysparkMainfile", RoutineArgs.builder()
            .datasetId(test.datasetId())
            .routineId("routine_id")
            .routineType("PROCEDURE")
            .language("PYTHON")
            .definitionBody("")
            .sparkOptions(RoutineSparkOptionsArgs.builder()
                .connection(testConnection.name())
                .runtimeVersion("2.1")
                .mainFileUri("gs://test-bucket/main.py")
                .pyFileUris("gs://test-bucket/lib.py")
                .fileUris("gs://test-bucket/distribute_in_executor.json")
                .archiveUris("gs://test-bucket/distribute_in_executor.tar.gz")
                .build())
            .build());
    }
}
resources:
  test:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: dataset_id
  testConnection:
    type: gcp:bigquery:Connection
    name: test
    properties:
      connectionId: connection_id
      location: US
      spark: {}
  pysparkMainfile:
    type: gcp:bigquery:Routine
    name: pyspark_mainfile
    properties:
      datasetId: ${test.datasetId}
      routineId: routine_id
      routineType: PROCEDURE
      language: PYTHON
      definitionBody: ""
      sparkOptions:
        connection: ${testConnection.name}
        runtimeVersion: '2.1'
        mainFileUri: gs://test-bucket/main.py
        pyFileUris:
          - gs://test-bucket/lib.py
        fileUris:
          - gs://test-bucket/distribute_in_executor.json
        archiveUris:
          - gs://test-bucket/distribute_in_executor.tar.gz
Bigquery Routine Spark Jar
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
const testConnection = new gcp.bigquery.Connection("test", {
    connectionId: "connection_id",
    location: "US",
    spark: {},
});
const sparkJar = new gcp.bigquery.Routine("spark_jar", {
    datasetId: test.datasetId,
    routineId: "routine_id",
    routineType: "PROCEDURE",
    language: "SCALA",
    definitionBody: "",
    sparkOptions: {
        connection: testConnection.name,
        runtimeVersion: "2.1",
        containerImage: "gcr.io/my-project-id/my-spark-image:latest",
        mainClass: "com.google.test.jar.MainClass",
        jarUris: ["gs://test-bucket/uberjar_spark_spark3.jar"],
        properties: {
            "spark.dataproc.scaling.version": "2",
            "spark.reducer.fetchMigratedShuffle.enabled": "true",
        },
    },
});
import pulumi
import pulumi_gcp as gcp
test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
test_connection = gcp.bigquery.Connection("test",
    connection_id="connection_id",
    location="US",
    spark={})
spark_jar = gcp.bigquery.Routine("spark_jar",
    dataset_id=test.dataset_id,
    routine_id="routine_id",
    routine_type="PROCEDURE",
    language="SCALA",
    definition_body="",
    spark_options={
        "connection": test_connection.name,
        "runtime_version": "2.1",
        "container_image": "gcr.io/my-project-id/my-spark-image:latest",
        "main_class": "com.google.test.jar.MainClass",
        "jar_uris": ["gs://test-bucket/uberjar_spark_spark3.jar"],
        "properties": {
            "spark.dataproc.scaling.version": "2",
            "spark.reducer.fetchMigratedShuffle.enabled": "true",
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId: pulumi.String("dataset_id"),
		})
		if err != nil {
			return err
		}
		testConnection, err := bigquery.NewConnection(ctx, "test", &bigquery.ConnectionArgs{
			ConnectionId: pulumi.String("connection_id"),
			Location:     pulumi.String("US"),
			Spark:        &bigquery.ConnectionSparkArgs{},
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewRoutine(ctx, "spark_jar", &bigquery.RoutineArgs{
			DatasetId:      test.DatasetId,
			RoutineId:      pulumi.String("routine_id"),
			RoutineType:    pulumi.String("PROCEDURE"),
			Language:       pulumi.String("SCALA"),
			DefinitionBody: pulumi.String(""),
			SparkOptions: &bigquery.RoutineSparkOptionsArgs{
				Connection:     testConnection.Name,
				RuntimeVersion: pulumi.String("2.1"),
				ContainerImage: pulumi.String("gcr.io/my-project-id/my-spark-image:latest"),
				MainClass:      pulumi.String("com.google.test.jar.MainClass"),
				JarUris: pulumi.StringArray{
					pulumi.String("gs://test-bucket/uberjar_spark_spark3.jar"),
				},
				Properties: pulumi.StringMap{
					"spark.dataproc.scaling.version":             pulumi.String("2"),
					"spark.reducer.fetchMigratedShuffle.enabled": pulumi.String("true"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var test = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "dataset_id",
    });
    var testConnection = new Gcp.BigQuery.Connection("test", new()
    {
        ConnectionId = "connection_id",
        Location = "US",
        Spark = null,
    });
    var sparkJar = new Gcp.BigQuery.Routine("spark_jar", new()
    {
        DatasetId = test.DatasetId,
        RoutineId = "routine_id",
        RoutineType = "PROCEDURE",
        Language = "SCALA",
        DefinitionBody = "",
        SparkOptions = new Gcp.BigQuery.Inputs.RoutineSparkOptionsArgs
        {
            Connection = testConnection.Name,
            RuntimeVersion = "2.1",
            ContainerImage = "gcr.io/my-project-id/my-spark-image:latest",
            MainClass = "com.google.test.jar.MainClass",
            JarUris = new[]
            {
                "gs://test-bucket/uberjar_spark_spark3.jar",
            },
            Properties = 
            {
                { "spark.dataproc.scaling.version", "2" },
                { "spark.reducer.fetchMigratedShuffle.enabled", "true" },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Connection;
import com.pulumi.gcp.bigquery.ConnectionArgs;
import com.pulumi.gcp.bigquery.inputs.ConnectionSparkArgs;
import com.pulumi.gcp.bigquery.Routine;
import com.pulumi.gcp.bigquery.RoutineArgs;
import com.pulumi.gcp.bigquery.inputs.RoutineSparkOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var test = new Dataset("test", DatasetArgs.builder()
            .datasetId("dataset_id")
            .build());
        var testConnection = new Connection("testConnection", ConnectionArgs.builder()
            .connectionId("connection_id")
            .location("US")
            .spark()
            .build());
        var sparkJar = new Routine("sparkJar", RoutineArgs.builder()
            .datasetId(test.datasetId())
            .routineId("routine_id")
            .routineType("PROCEDURE")
            .language("SCALA")
            .definitionBody("")
            .sparkOptions(RoutineSparkOptionsArgs.builder()
                .connection(testConnection.name())
                .runtimeVersion("2.1")
                .containerImage("gcr.io/my-project-id/my-spark-image:latest")
                .mainClass("com.google.test.jar.MainClass")
                .jarUris("gs://test-bucket/uberjar_spark_spark3.jar")
                .properties(Map.ofEntries(
                    Map.entry("spark.dataproc.scaling.version", "2"),
                    Map.entry("spark.reducer.fetchMigratedShuffle.enabled", "true")
                ))
                .build())
            .build());
    }
}
resources:
  test:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: dataset_id
  testConnection:
    type: gcp:bigquery:Connection
    name: test
    properties:
      connectionId: connection_id
      location: US
      spark: {}
  sparkJar:
    type: gcp:bigquery:Routine
    name: spark_jar
    properties:
      datasetId: ${test.datasetId}
      routineId: routine_id
      routineType: PROCEDURE
      language: SCALA
      definitionBody: ""
      sparkOptions:
        connection: ${testConnection.name}
        runtimeVersion: '2.1'
        containerImage: gcr.io/my-project-id/my-spark-image:latest
        mainClass: com.google.test.jar.MainClass
        jarUris:
          - gs://test-bucket/uberjar_spark_spark3.jar
        properties:
          spark.dataproc.scaling.version: '2'
          spark.reducer.fetchMigratedShuffle.enabled: 'true'
Bigquery Routine Data Governance Type
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.bigquery.Dataset("test", {datasetId: "tf_test_dataset_id_81126"});
const customMaskingRoutine = new gcp.bigquery.Routine("custom_masking_routine", {
    datasetId: test.datasetId,
    routineId: "custom_masking_routine",
    routineType: "SCALAR_FUNCTION",
    language: "SQL",
    dataGovernanceType: "DATA_MASKING",
    definitionBody: "SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')",
    arguments: [{
        name: "ssn",
        dataType: "{\"typeKind\" :  \"STRING\"}",
    }],
    returnType: "{\"typeKind\" :  \"STRING\"}",
});
import pulumi
import pulumi_gcp as gcp
test = gcp.bigquery.Dataset("test", dataset_id="tf_test_dataset_id_81126")
custom_masking_routine = gcp.bigquery.Routine("custom_masking_routine",
    dataset_id=test.dataset_id,
    routine_id="custom_masking_routine",
    routine_type="SCALAR_FUNCTION",
    language="SQL",
    data_governance_type="DATA_MASKING",
    definition_body="SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')",
    arguments=[{
        "name": "ssn",
        "data_type": "{\"typeKind\" :  \"STRING\"}",
    }],
    return_type="{\"typeKind\" :  \"STRING\"}")
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId: pulumi.String("tf_test_dataset_id_81126"),
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewRoutine(ctx, "custom_masking_routine", &bigquery.RoutineArgs{
			DatasetId:          test.DatasetId,
			RoutineId:          pulumi.String("custom_masking_routine"),
			RoutineType:        pulumi.String("SCALAR_FUNCTION"),
			Language:           pulumi.String("SQL"),
			DataGovernanceType: pulumi.String("DATA_MASKING"),
			DefinitionBody:     pulumi.String("SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')"),
			Arguments: bigquery.RoutineArgumentArray{
				&bigquery.RoutineArgumentArgs{
					Name:     pulumi.String("ssn"),
					DataType: pulumi.String("{\"typeKind\" :  \"STRING\"}"),
				},
			},
			ReturnType: pulumi.String("{\"typeKind\" :  \"STRING\"}"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var test = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "tf_test_dataset_id_81126",
    });
    var customMaskingRoutine = new Gcp.BigQuery.Routine("custom_masking_routine", new()
    {
        DatasetId = test.DatasetId,
        RoutineId = "custom_masking_routine",
        RoutineType = "SCALAR_FUNCTION",
        Language = "SQL",
        DataGovernanceType = "DATA_MASKING",
        DefinitionBody = "SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')",
        Arguments = new[]
        {
            new Gcp.BigQuery.Inputs.RoutineArgumentArgs
            {
                Name = "ssn",
                DataType = "{\"typeKind\" :  \"STRING\"}",
            },
        },
        ReturnType = "{\"typeKind\" :  \"STRING\"}",
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Routine;
import com.pulumi.gcp.bigquery.RoutineArgs;
import com.pulumi.gcp.bigquery.inputs.RoutineArgumentArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var test = new Dataset("test", DatasetArgs.builder()
            .datasetId("tf_test_dataset_id_81126")
            .build());
        var customMaskingRoutine = new Routine("customMaskingRoutine", RoutineArgs.builder()
            .datasetId(test.datasetId())
            .routineId("custom_masking_routine")
            .routineType("SCALAR_FUNCTION")
            .language("SQL")
            .dataGovernanceType("DATA_MASKING")
            .definitionBody("SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')")
            .arguments(RoutineArgumentArgs.builder()
                .name("ssn")
                .dataType("{\"typeKind\" :  \"STRING\"}")
                .build())
            .returnType("{\"typeKind\" :  \"STRING\"}")
            .build());
    }
}
resources:
  test:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: tf_test_dataset_id_81126
  customMaskingRoutine:
    type: gcp:bigquery:Routine
    name: custom_masking_routine
    properties:
      datasetId: ${test.datasetId}
      routineId: custom_masking_routine
      routineType: SCALAR_FUNCTION
      language: SQL
      dataGovernanceType: DATA_MASKING
      definitionBody: SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')
      arguments:
        - name: ssn
          dataType: '{"typeKind" :  "STRING"}'
      returnType: '{"typeKind" :  "STRING"}'
Bigquery Routine Remote Function
import * as pulumi from "@pulumi/pulumi";
import * as gcp from "@pulumi/gcp";
const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
const testConnection = new gcp.bigquery.Connection("test", {
    connectionId: "connection_id",
    location: "US",
    cloudResource: {},
});
const remoteFunction = new gcp.bigquery.Routine("remote_function", {
    datasetId: test.datasetId,
    routineId: "routine_id",
    routineType: "SCALAR_FUNCTION",
    definitionBody: "",
    returnType: "{\"typeKind\" :  \"STRING\"}",
    remoteFunctionOptions: {
        endpoint: "https://us-east1-my_gcf_project.cloudfunctions.net/remote_add",
        connection: testConnection.name,
        maxBatchingRows: "10",
        userDefinedContext: {
            z: "1.5",
        },
    },
});
import pulumi
import pulumi_gcp as gcp
test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
test_connection = gcp.bigquery.Connection("test",
    connection_id="connection_id",
    location="US",
    cloud_resource={})
remote_function = gcp.bigquery.Routine("remote_function",
    dataset_id=test.dataset_id,
    routine_id="routine_id",
    routine_type="SCALAR_FUNCTION",
    definition_body="",
    return_type="{\"typeKind\" :  \"STRING\"}",
    remote_function_options={
        "endpoint": "https://us-east1-my_gcf_project.cloudfunctions.net/remote_add",
        "connection": test_connection.name,
        "max_batching_rows": "10",
        "user_defined_context": {
            "z": "1.5",
        },
    })
package main
import (
	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
			DatasetId: pulumi.String("dataset_id"),
		})
		if err != nil {
			return err
		}
		testConnection, err := bigquery.NewConnection(ctx, "test", &bigquery.ConnectionArgs{
			ConnectionId:  pulumi.String("connection_id"),
			Location:      pulumi.String("US"),
			CloudResource: &bigquery.ConnectionCloudResourceArgs{},
		})
		if err != nil {
			return err
		}
		_, err = bigquery.NewRoutine(ctx, "remote_function", &bigquery.RoutineArgs{
			DatasetId:      test.DatasetId,
			RoutineId:      pulumi.String("routine_id"),
			RoutineType:    pulumi.String("SCALAR_FUNCTION"),
			DefinitionBody: pulumi.String(""),
			ReturnType:     pulumi.String("{\"typeKind\" :  \"STRING\"}"),
			RemoteFunctionOptions: &bigquery.RoutineRemoteFunctionOptionsArgs{
				Endpoint:        pulumi.String("https://us-east1-my_gcf_project.cloudfunctions.net/remote_add"),
				Connection:      testConnection.Name,
				MaxBatchingRows: pulumi.String("10"),
				UserDefinedContext: pulumi.StringMap{
					"z": pulumi.String("1.5"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Gcp = Pulumi.Gcp;
return await Deployment.RunAsync(() => 
{
    var test = new Gcp.BigQuery.Dataset("test", new()
    {
        DatasetId = "dataset_id",
    });
    var testConnection = new Gcp.BigQuery.Connection("test", new()
    {
        ConnectionId = "connection_id",
        Location = "US",
        CloudResource = null,
    });
    var remoteFunction = new Gcp.BigQuery.Routine("remote_function", new()
    {
        DatasetId = test.DatasetId,
        RoutineId = "routine_id",
        RoutineType = "SCALAR_FUNCTION",
        DefinitionBody = "",
        ReturnType = "{\"typeKind\" :  \"STRING\"}",
        RemoteFunctionOptions = new Gcp.BigQuery.Inputs.RoutineRemoteFunctionOptionsArgs
        {
            Endpoint = "https://us-east1-my_gcf_project.cloudfunctions.net/remote_add",
            Connection = testConnection.Name,
            MaxBatchingRows = "10",
            UserDefinedContext = 
            {
                { "z", "1.5" },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.gcp.bigquery.Dataset;
import com.pulumi.gcp.bigquery.DatasetArgs;
import com.pulumi.gcp.bigquery.Connection;
import com.pulumi.gcp.bigquery.ConnectionArgs;
import com.pulumi.gcp.bigquery.inputs.ConnectionCloudResourceArgs;
import com.pulumi.gcp.bigquery.Routine;
import com.pulumi.gcp.bigquery.RoutineArgs;
import com.pulumi.gcp.bigquery.inputs.RoutineRemoteFunctionOptionsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var test = new Dataset("test", DatasetArgs.builder()
            .datasetId("dataset_id")
            .build());
        var testConnection = new Connection("testConnection", ConnectionArgs.builder()
            .connectionId("connection_id")
            .location("US")
            .cloudResource()
            .build());
        var remoteFunction = new Routine("remoteFunction", RoutineArgs.builder()
            .datasetId(test.datasetId())
            .routineId("routine_id")
            .routineType("SCALAR_FUNCTION")
            .definitionBody("")
            .returnType("{\"typeKind\" :  \"STRING\"}")
            .remoteFunctionOptions(RoutineRemoteFunctionOptionsArgs.builder()
                .endpoint("https://us-east1-my_gcf_project.cloudfunctions.net/remote_add")
                .connection(testConnection.name())
                .maxBatchingRows("10")
                .userDefinedContext(Map.of("z", "1.5"))
                .build())
            .build());
    }
}
resources:
  test:
    type: gcp:bigquery:Dataset
    properties:
      datasetId: dataset_id
  testConnection:
    type: gcp:bigquery:Connection
    name: test
    properties:
      connectionId: connection_id
      location: US
      cloudResource: {}
  remoteFunction:
    type: gcp:bigquery:Routine
    name: remote_function
    properties:
      datasetId: ${test.datasetId}
      routineId: routine_id
      routineType: SCALAR_FUNCTION
      definitionBody: ""
      returnType: '{"typeKind" :  "STRING"}'
      remoteFunctionOptions:
        endpoint: https://us-east1-my_gcf_project.cloudfunctions.net/remote_add
        connection: ${testConnection.name}
        maxBatchingRows: '10'
        userDefinedContext:
          z: '1.5'
Create Routine Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new Routine(name: string, args: RoutineArgs, opts?: CustomResourceOptions);@overload
def Routine(resource_name: str,
            args: RoutineArgs,
            opts: Optional[ResourceOptions] = None)
@overload
def Routine(resource_name: str,
            opts: Optional[ResourceOptions] = None,
            routine_id: Optional[str] = None,
            routine_type: Optional[str] = None,
            dataset_id: Optional[str] = None,
            definition_body: Optional[str] = None,
            imported_libraries: Optional[Sequence[str]] = None,
            determinism_level: Optional[str] = None,
            arguments: Optional[Sequence[RoutineArgumentArgs]] = None,
            language: Optional[str] = None,
            project: Optional[str] = None,
            remote_function_options: Optional[RoutineRemoteFunctionOptionsArgs] = None,
            return_table_type: Optional[str] = None,
            return_type: Optional[str] = None,
            description: Optional[str] = None,
            data_governance_type: Optional[str] = None,
            spark_options: Optional[RoutineSparkOptionsArgs] = None)func NewRoutine(ctx *Context, name string, args RoutineArgs, opts ...ResourceOption) (*Routine, error)public Routine(string name, RoutineArgs args, CustomResourceOptions? opts = null)
public Routine(String name, RoutineArgs args)
public Routine(String name, RoutineArgs args, CustomResourceOptions options)
type: gcp:bigquery:Routine
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args RoutineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args RoutineArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args RoutineArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args RoutineArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args RoutineArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var routineResource = new Gcp.BigQuery.Routine("routineResource", new()
{
    RoutineId = "string",
    RoutineType = "string",
    DatasetId = "string",
    DefinitionBody = "string",
    ImportedLibraries = new[]
    {
        "string",
    },
    DeterminismLevel = "string",
    Arguments = new[]
    {
        new Gcp.BigQuery.Inputs.RoutineArgumentArgs
        {
            ArgumentKind = "string",
            DataType = "string",
            Mode = "string",
            Name = "string",
        },
    },
    Language = "string",
    Project = "string",
    RemoteFunctionOptions = new Gcp.BigQuery.Inputs.RoutineRemoteFunctionOptionsArgs
    {
        Connection = "string",
        Endpoint = "string",
        MaxBatchingRows = "string",
        UserDefinedContext = 
        {
            { "string", "string" },
        },
    },
    ReturnTableType = "string",
    ReturnType = "string",
    Description = "string",
    DataGovernanceType = "string",
    SparkOptions = new Gcp.BigQuery.Inputs.RoutineSparkOptionsArgs
    {
        ArchiveUris = new[]
        {
            "string",
        },
        Connection = "string",
        ContainerImage = "string",
        FileUris = new[]
        {
            "string",
        },
        JarUris = new[]
        {
            "string",
        },
        MainClass = "string",
        MainFileUri = "string",
        Properties = 
        {
            { "string", "string" },
        },
        PyFileUris = new[]
        {
            "string",
        },
        RuntimeVersion = "string",
    },
});
example, err := bigquery.NewRoutine(ctx, "routineResource", &bigquery.RoutineArgs{
	RoutineId:      pulumi.String("string"),
	RoutineType:    pulumi.String("string"),
	DatasetId:      pulumi.String("string"),
	DefinitionBody: pulumi.String("string"),
	ImportedLibraries: pulumi.StringArray{
		pulumi.String("string"),
	},
	DeterminismLevel: pulumi.String("string"),
	Arguments: bigquery.RoutineArgumentArray{
		&bigquery.RoutineArgumentArgs{
			ArgumentKind: pulumi.String("string"),
			DataType:     pulumi.String("string"),
			Mode:         pulumi.String("string"),
			Name:         pulumi.String("string"),
		},
	},
	Language: pulumi.String("string"),
	Project:  pulumi.String("string"),
	RemoteFunctionOptions: &bigquery.RoutineRemoteFunctionOptionsArgs{
		Connection:      pulumi.String("string"),
		Endpoint:        pulumi.String("string"),
		MaxBatchingRows: pulumi.String("string"),
		UserDefinedContext: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
	},
	ReturnTableType:    pulumi.String("string"),
	ReturnType:         pulumi.String("string"),
	Description:        pulumi.String("string"),
	DataGovernanceType: pulumi.String("string"),
	SparkOptions: &bigquery.RoutineSparkOptionsArgs{
		ArchiveUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		Connection:     pulumi.String("string"),
		ContainerImage: pulumi.String("string"),
		FileUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		JarUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		MainClass:   pulumi.String("string"),
		MainFileUri: pulumi.String("string"),
		Properties: pulumi.StringMap{
			"string": pulumi.String("string"),
		},
		PyFileUris: pulumi.StringArray{
			pulumi.String("string"),
		},
		RuntimeVersion: pulumi.String("string"),
	},
})
var routineResource = new Routine("routineResource", RoutineArgs.builder()
    .routineId("string")
    .routineType("string")
    .datasetId("string")
    .definitionBody("string")
    .importedLibraries("string")
    .determinismLevel("string")
    .arguments(RoutineArgumentArgs.builder()
        .argumentKind("string")
        .dataType("string")
        .mode("string")
        .name("string")
        .build())
    .language("string")
    .project("string")
    .remoteFunctionOptions(RoutineRemoteFunctionOptionsArgs.builder()
        .connection("string")
        .endpoint("string")
        .maxBatchingRows("string")
        .userDefinedContext(Map.of("string", "string"))
        .build())
    .returnTableType("string")
    .returnType("string")
    .description("string")
    .dataGovernanceType("string")
    .sparkOptions(RoutineSparkOptionsArgs.builder()
        .archiveUris("string")
        .connection("string")
        .containerImage("string")
        .fileUris("string")
        .jarUris("string")
        .mainClass("string")
        .mainFileUri("string")
        .properties(Map.of("string", "string"))
        .pyFileUris("string")
        .runtimeVersion("string")
        .build())
    .build());
routine_resource = gcp.bigquery.Routine("routineResource",
    routine_id="string",
    routine_type="string",
    dataset_id="string",
    definition_body="string",
    imported_libraries=["string"],
    determinism_level="string",
    arguments=[{
        "argument_kind": "string",
        "data_type": "string",
        "mode": "string",
        "name": "string",
    }],
    language="string",
    project="string",
    remote_function_options={
        "connection": "string",
        "endpoint": "string",
        "max_batching_rows": "string",
        "user_defined_context": {
            "string": "string",
        },
    },
    return_table_type="string",
    return_type="string",
    description="string",
    data_governance_type="string",
    spark_options={
        "archive_uris": ["string"],
        "connection": "string",
        "container_image": "string",
        "file_uris": ["string"],
        "jar_uris": ["string"],
        "main_class": "string",
        "main_file_uri": "string",
        "properties": {
            "string": "string",
        },
        "py_file_uris": ["string"],
        "runtime_version": "string",
    })
const routineResource = new gcp.bigquery.Routine("routineResource", {
    routineId: "string",
    routineType: "string",
    datasetId: "string",
    definitionBody: "string",
    importedLibraries: ["string"],
    determinismLevel: "string",
    arguments: [{
        argumentKind: "string",
        dataType: "string",
        mode: "string",
        name: "string",
    }],
    language: "string",
    project: "string",
    remoteFunctionOptions: {
        connection: "string",
        endpoint: "string",
        maxBatchingRows: "string",
        userDefinedContext: {
            string: "string",
        },
    },
    returnTableType: "string",
    returnType: "string",
    description: "string",
    dataGovernanceType: "string",
    sparkOptions: {
        archiveUris: ["string"],
        connection: "string",
        containerImage: "string",
        fileUris: ["string"],
        jarUris: ["string"],
        mainClass: "string",
        mainFileUri: "string",
        properties: {
            string: "string",
        },
        pyFileUris: ["string"],
        runtimeVersion: "string",
    },
});
type: gcp:bigquery:Routine
properties:
    arguments:
        - argumentKind: string
          dataType: string
          mode: string
          name: string
    dataGovernanceType: string
    datasetId: string
    definitionBody: string
    description: string
    determinismLevel: string
    importedLibraries:
        - string
    language: string
    project: string
    remoteFunctionOptions:
        connection: string
        endpoint: string
        maxBatchingRows: string
        userDefinedContext:
            string: string
    returnTableType: string
    returnType: string
    routineId: string
    routineType: string
    sparkOptions:
        archiveUris:
            - string
        connection: string
        containerImage: string
        fileUris:
            - string
        jarUris:
            - string
        mainClass: string
        mainFileUri: string
        properties:
            string: string
        pyFileUris:
            - string
        runtimeVersion: string
Routine Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The Routine resource accepts the following input properties:
- DatasetId string
- The ID of the dataset containing this routine
- DefinitionBody string
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- RoutineId string
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- RoutineType string
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- Arguments
List<RoutineArgument> 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- DataGovernance stringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- Description string
- The description of the routine if defined.
- DeterminismLevel string
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- ImportedLibraries List<string>
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- Language string
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- RemoteFunction RoutineOptions Remote Function Options 
- Remote function specific options. Structure is documented below.
- ReturnTable stringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- ReturnType string
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- SparkOptions RoutineSpark Options 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- DatasetId string
- The ID of the dataset containing this routine
- DefinitionBody string
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- RoutineId string
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- RoutineType string
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- Arguments
[]RoutineArgument Args 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- DataGovernance stringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- Description string
- The description of the routine if defined.
- DeterminismLevel string
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- ImportedLibraries []string
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- Language string
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- RemoteFunction RoutineOptions Remote Function Options Args 
- Remote function specific options. Structure is documented below.
- ReturnTable stringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- ReturnType string
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- SparkOptions RoutineSpark Options Args 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- datasetId String
- The ID of the dataset containing this routine
- definitionBody String
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- routineId String
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- routineType String
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- arguments
List<RoutineArgument> 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- dataGovernance StringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- description String
- The description of the routine if defined.
- determinismLevel String
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- importedLibraries List<String>
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- language String
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- remoteFunction RoutineOptions Remote Function Options 
- Remote function specific options. Structure is documented below.
- returnTable StringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- returnType String
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- sparkOptions RoutineSpark Options 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- datasetId string
- The ID of the dataset containing this routine
- definitionBody string
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- routineId string
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- routineType string
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- arguments
RoutineArgument[] 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- dataGovernance stringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- description string
- The description of the routine if defined.
- determinismLevel string
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- importedLibraries string[]
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- language string
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- remoteFunction RoutineOptions Remote Function Options 
- Remote function specific options. Structure is documented below.
- returnTable stringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- returnType string
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- sparkOptions RoutineSpark Options 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- dataset_id str
- The ID of the dataset containing this routine
- definition_body str
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- routine_id str
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- routine_type str
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- arguments
Sequence[RoutineArgument Args] 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- data_governance_ strtype 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- description str
- The description of the routine if defined.
- determinism_level str
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- imported_libraries Sequence[str]
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- language str
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- remote_function_ Routineoptions Remote Function Options Args 
- Remote function specific options. Structure is documented below.
- return_table_ strtype 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- return_type str
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- spark_options RoutineSpark Options Args 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- datasetId String
- The ID of the dataset containing this routine
- definitionBody String
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- routineId String
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- routineType String
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- arguments List<Property Map>
- Input/output argument of a function or a stored procedure. Structure is documented below.
- dataGovernance StringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- description String
- The description of the routine if defined.
- determinismLevel String
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- importedLibraries List<String>
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- language String
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- remoteFunction Property MapOptions 
- Remote function specific options. Structure is documented below.
- returnTable StringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- returnType String
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- sparkOptions Property Map
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the Routine resource produces the following output properties:
- CreationTime int
- The time when this routine was created, in milliseconds since the epoch.
- Id string
- The provider-assigned unique ID for this managed resource.
- LastModified intTime 
- The time when this routine was modified, in milliseconds since the epoch.
- CreationTime int
- The time when this routine was created, in milliseconds since the epoch.
- Id string
- The provider-assigned unique ID for this managed resource.
- LastModified intTime 
- The time when this routine was modified, in milliseconds since the epoch.
- creationTime Integer
- The time when this routine was created, in milliseconds since the epoch.
- id String
- The provider-assigned unique ID for this managed resource.
- lastModified IntegerTime 
- The time when this routine was modified, in milliseconds since the epoch.
- creationTime number
- The time when this routine was created, in milliseconds since the epoch.
- id string
- The provider-assigned unique ID for this managed resource.
- lastModified numberTime 
- The time when this routine was modified, in milliseconds since the epoch.
- creation_time int
- The time when this routine was created, in milliseconds since the epoch.
- id str
- The provider-assigned unique ID for this managed resource.
- last_modified_ inttime 
- The time when this routine was modified, in milliseconds since the epoch.
- creationTime Number
- The time when this routine was created, in milliseconds since the epoch.
- id String
- The provider-assigned unique ID for this managed resource.
- lastModified NumberTime 
- The time when this routine was modified, in milliseconds since the epoch.
Look up Existing Routine Resource
Get an existing Routine resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: RoutineState, opts?: CustomResourceOptions): Routine@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        arguments: Optional[Sequence[RoutineArgumentArgs]] = None,
        creation_time: Optional[int] = None,
        data_governance_type: Optional[str] = None,
        dataset_id: Optional[str] = None,
        definition_body: Optional[str] = None,
        description: Optional[str] = None,
        determinism_level: Optional[str] = None,
        imported_libraries: Optional[Sequence[str]] = None,
        language: Optional[str] = None,
        last_modified_time: Optional[int] = None,
        project: Optional[str] = None,
        remote_function_options: Optional[RoutineRemoteFunctionOptionsArgs] = None,
        return_table_type: Optional[str] = None,
        return_type: Optional[str] = None,
        routine_id: Optional[str] = None,
        routine_type: Optional[str] = None,
        spark_options: Optional[RoutineSparkOptionsArgs] = None) -> Routinefunc GetRoutine(ctx *Context, name string, id IDInput, state *RoutineState, opts ...ResourceOption) (*Routine, error)public static Routine Get(string name, Input<string> id, RoutineState? state, CustomResourceOptions? opts = null)public static Routine get(String name, Output<String> id, RoutineState state, CustomResourceOptions options)resources:  _:    type: gcp:bigquery:Routine    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Arguments
List<RoutineArgument> 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- CreationTime int
- The time when this routine was created, in milliseconds since the epoch.
- DataGovernance stringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- DatasetId string
- The ID of the dataset containing this routine
- DefinitionBody string
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- Description string
- The description of the routine if defined.
- DeterminismLevel string
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- ImportedLibraries List<string>
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- Language string
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- LastModified intTime 
- The time when this routine was modified, in milliseconds since the epoch.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- RemoteFunction RoutineOptions Remote Function Options 
- Remote function specific options. Structure is documented below.
- ReturnTable stringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- ReturnType string
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- RoutineId string
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- RoutineType string
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- SparkOptions RoutineSpark Options 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- Arguments
[]RoutineArgument Args 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- CreationTime int
- The time when this routine was created, in milliseconds since the epoch.
- DataGovernance stringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- DatasetId string
- The ID of the dataset containing this routine
- DefinitionBody string
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- Description string
- The description of the routine if defined.
- DeterminismLevel string
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- ImportedLibraries []string
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- Language string
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- LastModified intTime 
- The time when this routine was modified, in milliseconds since the epoch.
- Project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- RemoteFunction RoutineOptions Remote Function Options Args 
- Remote function specific options. Structure is documented below.
- ReturnTable stringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- ReturnType string
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- RoutineId string
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- RoutineType string
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- SparkOptions RoutineSpark Options Args 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- arguments
List<RoutineArgument> 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- creationTime Integer
- The time when this routine was created, in milliseconds since the epoch.
- dataGovernance StringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- datasetId String
- The ID of the dataset containing this routine
- definitionBody String
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- description String
- The description of the routine if defined.
- determinismLevel String
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- importedLibraries List<String>
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- language String
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- lastModified IntegerTime 
- The time when this routine was modified, in milliseconds since the epoch.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- remoteFunction RoutineOptions Remote Function Options 
- Remote function specific options. Structure is documented below.
- returnTable StringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- returnType String
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- routineId String
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- routineType String
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- sparkOptions RoutineSpark Options 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- arguments
RoutineArgument[] 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- creationTime number
- The time when this routine was created, in milliseconds since the epoch.
- dataGovernance stringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- datasetId string
- The ID of the dataset containing this routine
- definitionBody string
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- description string
- The description of the routine if defined.
- determinismLevel string
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- importedLibraries string[]
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- language string
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- lastModified numberTime 
- The time when this routine was modified, in milliseconds since the epoch.
- project string
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- remoteFunction RoutineOptions Remote Function Options 
- Remote function specific options. Structure is documented below.
- returnTable stringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- returnType string
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- routineId string
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- routineType string
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- sparkOptions RoutineSpark Options 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- arguments
Sequence[RoutineArgument Args] 
- Input/output argument of a function or a stored procedure. Structure is documented below.
- creation_time int
- The time when this routine was created, in milliseconds since the epoch.
- data_governance_ strtype 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- dataset_id str
- The ID of the dataset containing this routine
- definition_body str
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- description str
- The description of the routine if defined.
- determinism_level str
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- imported_libraries Sequence[str]
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- language str
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- last_modified_ inttime 
- The time when this routine was modified, in milliseconds since the epoch.
- project str
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- remote_function_ Routineoptions Remote Function Options Args 
- Remote function specific options. Structure is documented below.
- return_table_ strtype 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- return_type str
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- routine_id str
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- routine_type str
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- spark_options RoutineSpark Options Args 
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
- arguments List<Property Map>
- Input/output argument of a function or a stored procedure. Structure is documented below.
- creationTime Number
- The time when this routine was created, in milliseconds since the epoch.
- dataGovernance StringType 
- If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
Possible values are: DATA_MASKING.
- datasetId String
- The ID of the dataset containing this routine
- definitionBody String
- The body of the routine. For functions, this is the expression in the AS clause.
If language=SQL, it is the substring inside (but excluding) the parentheses.
- description String
- The description of the routine if defined.
- determinismLevel String
- The determinism level of the JavaScript UDF if defined.
Possible values are: DETERMINISM_LEVEL_UNSPECIFIED,DETERMINISTIC,NOT_DETERMINISTIC.
- importedLibraries List<String>
- Optional. If language = "JAVASCRIPT", this field stores the path of the imported JAVASCRIPT libraries.
- language String
- The language of the routine.
Possible values are: SQL,JAVASCRIPT,PYTHON,JAVA,SCALA.
- lastModified NumberTime 
- The time when this routine was modified, in milliseconds since the epoch.
- project String
- The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
- remoteFunction Property MapOptions 
- Remote function specific options. Structure is documented below.
- returnTable StringType 
- Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definitionBody at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time.
- returnType String
- A JSON schema for the return type. Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definitionBody at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switche d the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- routineId String
- The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
- routineType String
- The type of routine.
Possible values are: SCALAR_FUNCTION,PROCEDURE,TABLE_VALUED_FUNCTION.
- sparkOptions Property Map
- Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. Structure is documented below.
Supporting Types
RoutineArgument, RoutineArgumentArgs    
- ArgumentKind string
- Defaults to FIXED_TYPE.
Default value is FIXED_TYPE. Possible values are:FIXED_TYPE,ANY_TYPE.
- DataType string
- A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- Mode string
- Specifies whether the argument is input or output. Can be set for procedures only.
Possible values are: IN,OUT,INOUT.
- Name string
- The name of this argument. Can be absent for function return argument.
- ArgumentKind string
- Defaults to FIXED_TYPE.
Default value is FIXED_TYPE. Possible values are:FIXED_TYPE,ANY_TYPE.
- DataType string
- A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- Mode string
- Specifies whether the argument is input or output. Can be set for procedures only.
Possible values are: IN,OUT,INOUT.
- Name string
- The name of this argument. Can be absent for function return argument.
- argumentKind String
- Defaults to FIXED_TYPE.
Default value is FIXED_TYPE. Possible values are:FIXED_TYPE,ANY_TYPE.
- dataType String
- A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- mode String
- Specifies whether the argument is input or output. Can be set for procedures only.
Possible values are: IN,OUT,INOUT.
- name String
- The name of this argument. Can be absent for function return argument.
- argumentKind string
- Defaults to FIXED_TYPE.
Default value is FIXED_TYPE. Possible values are:FIXED_TYPE,ANY_TYPE.
- dataType string
- A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- mode string
- Specifies whether the argument is input or output. Can be set for procedures only.
Possible values are: IN,OUT,INOUT.
- name string
- The name of this argument. Can be absent for function return argument.
- argument_kind str
- Defaults to FIXED_TYPE.
Default value is FIXED_TYPE. Possible values are:FIXED_TYPE,ANY_TYPE.
- data_type str
- A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- mode str
- Specifies whether the argument is input or output. Can be set for procedures only.
Possible values are: IN,OUT,INOUT.
- name str
- The name of this argument. Can be absent for function return argument.
- argumentKind String
- Defaults to FIXED_TYPE.
Default value is FIXED_TYPE. Possible values are:FIXED_TYPE,ANY_TYPE.
- dataType String
- A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. ~>NOTE: Because this field expects a JSON string, any changes to the string will create a diff, even if the JSON itself hasn't changed. If the API returns a different value for the same schema, e.g. it switched the order of values or replaced STRUCT field type with RECORD field type, we currently cannot suppress the recurring diff this causes. As a workaround, we recommend using the schema as returned by the API.
- mode String
- Specifies whether the argument is input or output. Can be set for procedures only.
Possible values are: IN,OUT,INOUT.
- name String
- The name of this argument. Can be absent for function return argument.
RoutineRemoteFunctionOptions, RoutineRemoteFunctionOptionsArgs        
- Connection string
- Fully qualified name of the user-provided connection object which holds the authentication information to send requests to the remote service. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- Endpoint string
- Endpoint of the user-provided remote service, e.g.
https://us-east1-my_gcf_project.cloudfunctions.net/remote_add
- MaxBatching stringRows 
- Max number of rows in each batch sent to the remote service. If absent or if 0, BigQuery dynamically decides the number of rows in a batch.
- UserDefined Dictionary<string, string>Context 
- User-defined context as a set of key/value pairs, which will be sent as function
invocation context together with batched arguments in the requests to the remote
service. The total number of bytes of keys and values must be less than 8KB.
An object containing a list of "key": value pairs. Example:
{ "name": "wrench", "mass": "1.3kg", "count": "3" }.
- Connection string
- Fully qualified name of the user-provided connection object which holds the authentication information to send requests to the remote service. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- Endpoint string
- Endpoint of the user-provided remote service, e.g.
https://us-east1-my_gcf_project.cloudfunctions.net/remote_add
- MaxBatching stringRows 
- Max number of rows in each batch sent to the remote service. If absent or if 0, BigQuery dynamically decides the number of rows in a batch.
- UserDefined map[string]stringContext 
- User-defined context as a set of key/value pairs, which will be sent as function
invocation context together with batched arguments in the requests to the remote
service. The total number of bytes of keys and values must be less than 8KB.
An object containing a list of "key": value pairs. Example:
{ "name": "wrench", "mass": "1.3kg", "count": "3" }.
- connection String
- Fully qualified name of the user-provided connection object which holds the authentication information to send requests to the remote service. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- endpoint String
- Endpoint of the user-provided remote service, e.g.
https://us-east1-my_gcf_project.cloudfunctions.net/remote_add
- maxBatching StringRows 
- Max number of rows in each batch sent to the remote service. If absent or if 0, BigQuery dynamically decides the number of rows in a batch.
- userDefined Map<String,String>Context 
- User-defined context as a set of key/value pairs, which will be sent as function
invocation context together with batched arguments in the requests to the remote
service. The total number of bytes of keys and values must be less than 8KB.
An object containing a list of "key": value pairs. Example:
{ "name": "wrench", "mass": "1.3kg", "count": "3" }.
- connection string
- Fully qualified name of the user-provided connection object which holds the authentication information to send requests to the remote service. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- endpoint string
- Endpoint of the user-provided remote service, e.g.
https://us-east1-my_gcf_project.cloudfunctions.net/remote_add
- maxBatching stringRows 
- Max number of rows in each batch sent to the remote service. If absent or if 0, BigQuery dynamically decides the number of rows in a batch.
- userDefined {[key: string]: string}Context 
- User-defined context as a set of key/value pairs, which will be sent as function
invocation context together with batched arguments in the requests to the remote
service. The total number of bytes of keys and values must be less than 8KB.
An object containing a list of "key": value pairs. Example:
{ "name": "wrench", "mass": "1.3kg", "count": "3" }.
- connection str
- Fully qualified name of the user-provided connection object which holds the authentication information to send requests to the remote service. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- endpoint str
- Endpoint of the user-provided remote service, e.g.
https://us-east1-my_gcf_project.cloudfunctions.net/remote_add
- max_batching_ strrows 
- Max number of rows in each batch sent to the remote service. If absent or if 0, BigQuery dynamically decides the number of rows in a batch.
- user_defined_ Mapping[str, str]context 
- User-defined context as a set of key/value pairs, which will be sent as function
invocation context together with batched arguments in the requests to the remote
service. The total number of bytes of keys and values must be less than 8KB.
An object containing a list of "key": value pairs. Example:
{ "name": "wrench", "mass": "1.3kg", "count": "3" }.
- connection String
- Fully qualified name of the user-provided connection object which holds the authentication information to send requests to the remote service. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- endpoint String
- Endpoint of the user-provided remote service, e.g.
https://us-east1-my_gcf_project.cloudfunctions.net/remote_add
- maxBatching StringRows 
- Max number of rows in each batch sent to the remote service. If absent or if 0, BigQuery dynamically decides the number of rows in a batch.
- userDefined Map<String>Context 
- User-defined context as a set of key/value pairs, which will be sent as function
invocation context together with batched arguments in the requests to the remote
service. The total number of bytes of keys and values must be less than 8KB.
An object containing a list of "key": value pairs. Example:
{ "name": "wrench", "mass": "1.3kg", "count": "3" }.
RoutineSparkOptions, RoutineSparkOptionsArgs      
- ArchiveUris List<string>
- Archive files to be extracted into the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- Connection string
- Fully qualified name of the user-provided Spark connection object. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- ContainerImage string
- Custom container image for the runtime environment.
- FileUris List<string>
- Files to be placed in the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- JarUris List<string>
- JARs to include on the driver and executor CLASSPATH. For more information about Apache Spark, see Apache Spark.
- MainClass string
- The fully qualified name of a class in jarUris, for example, com.example.wordcount. Exactly one of mainClass and main_jar_uri field should be set for Java/Scala language type.
- MainFile stringUri 
- The main file/jar URI of the Spark application. Exactly one of the definitionBody field and the mainFileUri field must be set for Python. Exactly one of mainClass and mainFileUri field should be set for Java/Scala language type.
- Properties Dictionary<string, string>
- Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. For more information, see Apache Spark and the procedure option list. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- PyFile List<string>Uris 
- Python files to be placed on the PYTHONPATH for PySpark application. Supported file types: .py, .egg, and .zip. For more information about Apache Spark, see Apache Spark.
- RuntimeVersion string
- Runtime version. If not specified, the default runtime version is used.
- ArchiveUris []string
- Archive files to be extracted into the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- Connection string
- Fully qualified name of the user-provided Spark connection object. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- ContainerImage string
- Custom container image for the runtime environment.
- FileUris []string
- Files to be placed in the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- JarUris []string
- JARs to include on the driver and executor CLASSPATH. For more information about Apache Spark, see Apache Spark.
- MainClass string
- The fully qualified name of a class in jarUris, for example, com.example.wordcount. Exactly one of mainClass and main_jar_uri field should be set for Java/Scala language type.
- MainFile stringUri 
- The main file/jar URI of the Spark application. Exactly one of the definitionBody field and the mainFileUri field must be set for Python. Exactly one of mainClass and mainFileUri field should be set for Java/Scala language type.
- Properties map[string]string
- Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. For more information, see Apache Spark and the procedure option list. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- PyFile []stringUris 
- Python files to be placed on the PYTHONPATH for PySpark application. Supported file types: .py, .egg, and .zip. For more information about Apache Spark, see Apache Spark.
- RuntimeVersion string
- Runtime version. If not specified, the default runtime version is used.
- archiveUris List<String>
- Archive files to be extracted into the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- connection String
- Fully qualified name of the user-provided Spark connection object. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- containerImage String
- Custom container image for the runtime environment.
- fileUris List<String>
- Files to be placed in the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- jarUris List<String>
- JARs to include on the driver and executor CLASSPATH. For more information about Apache Spark, see Apache Spark.
- mainClass String
- The fully qualified name of a class in jarUris, for example, com.example.wordcount. Exactly one of mainClass and main_jar_uri field should be set for Java/Scala language type.
- mainFile StringUri 
- The main file/jar URI of the Spark application. Exactly one of the definitionBody field and the mainFileUri field must be set for Python. Exactly one of mainClass and mainFileUri field should be set for Java/Scala language type.
- properties Map<String,String>
- Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. For more information, see Apache Spark and the procedure option list. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- pyFile List<String>Uris 
- Python files to be placed on the PYTHONPATH for PySpark application. Supported file types: .py, .egg, and .zip. For more information about Apache Spark, see Apache Spark.
- runtimeVersion String
- Runtime version. If not specified, the default runtime version is used.
- archiveUris string[]
- Archive files to be extracted into the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- connection string
- Fully qualified name of the user-provided Spark connection object. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- containerImage string
- Custom container image for the runtime environment.
- fileUris string[]
- Files to be placed in the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- jarUris string[]
- JARs to include on the driver and executor CLASSPATH. For more information about Apache Spark, see Apache Spark.
- mainClass string
- The fully qualified name of a class in jarUris, for example, com.example.wordcount. Exactly one of mainClass and main_jar_uri field should be set for Java/Scala language type.
- mainFile stringUri 
- The main file/jar URI of the Spark application. Exactly one of the definitionBody field and the mainFileUri field must be set for Python. Exactly one of mainClass and mainFileUri field should be set for Java/Scala language type.
- properties {[key: string]: string}
- Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. For more information, see Apache Spark and the procedure option list. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- pyFile string[]Uris 
- Python files to be placed on the PYTHONPATH for PySpark application. Supported file types: .py, .egg, and .zip. For more information about Apache Spark, see Apache Spark.
- runtimeVersion string
- Runtime version. If not specified, the default runtime version is used.
- archive_uris Sequence[str]
- Archive files to be extracted into the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- connection str
- Fully qualified name of the user-provided Spark connection object. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- container_image str
- Custom container image for the runtime environment.
- file_uris Sequence[str]
- Files to be placed in the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- jar_uris Sequence[str]
- JARs to include on the driver and executor CLASSPATH. For more information about Apache Spark, see Apache Spark.
- main_class str
- The fully qualified name of a class in jarUris, for example, com.example.wordcount. Exactly one of mainClass and main_jar_uri field should be set for Java/Scala language type.
- main_file_ struri 
- The main file/jar URI of the Spark application. Exactly one of the definitionBody field and the mainFileUri field must be set for Python. Exactly one of mainClass and mainFileUri field should be set for Java/Scala language type.
- properties Mapping[str, str]
- Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. For more information, see Apache Spark and the procedure option list. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- py_file_ Sequence[str]uris 
- Python files to be placed on the PYTHONPATH for PySpark application. Supported file types: .py, .egg, and .zip. For more information about Apache Spark, see Apache Spark.
- runtime_version str
- Runtime version. If not specified, the default runtime version is used.
- archiveUris List<String>
- Archive files to be extracted into the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- connection String
- Fully qualified name of the user-provided Spark connection object. Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}"
- containerImage String
- Custom container image for the runtime environment.
- fileUris List<String>
- Files to be placed in the working directory of each executor. For more information about Apache Spark, see Apache Spark.
- jarUris List<String>
- JARs to include on the driver and executor CLASSPATH. For more information about Apache Spark, see Apache Spark.
- mainClass String
- The fully qualified name of a class in jarUris, for example, com.example.wordcount. Exactly one of mainClass and main_jar_uri field should be set for Java/Scala language type.
- mainFile StringUri 
- The main file/jar URI of the Spark application. Exactly one of the definitionBody field and the mainFileUri field must be set for Python. Exactly one of mainClass and mainFileUri field should be set for Java/Scala language type.
- properties Map<String>
- Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. For more information, see Apache Spark and the procedure option list. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
- pyFile List<String>Uris 
- Python files to be placed on the PYTHONPATH for PySpark application. Supported file types: .py, .egg, and .zip. For more information about Apache Spark, see Apache Spark.
- runtimeVersion String
- Runtime version. If not specified, the default runtime version is used.
Import
Routine can be imported using any of these accepted formats:
- projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}
- {{project}}/{{dataset_id}}/{{routine_id}}
- {{dataset_id}}/{{routine_id}}
When using the pulumi import command, Routine can be imported using one of the formats above. For example:
$ pulumi import gcp:bigquery/routine:Routine default projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}
$ pulumi import gcp:bigquery/routine:Routine default {{project}}/{{dataset_id}}/{{routine_id}}
$ pulumi import gcp:bigquery/routine:Routine default {{dataset_id}}/{{routine_id}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Google Cloud (GCP) Classic pulumi/pulumi-gcp
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the google-betaTerraform Provider.