databricks.SqlEndpoint
Explore with Pulumi AI
This resource is used to manage Databricks SQL warehouses. To create SQL warehouses you must have databricks_sql_access on your databricks.Group or databricks_user.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const me = databricks.getCurrentUser({});
const _this = new databricks.SqlEndpoint("this", {
    name: me.then(me => `Endpoint of ${me.alphanumeric}`),
    clusterSize: "Small",
    maxNumClusters: 1,
    tags: {
        customTags: [{
            key: "City",
            value: "Amsterdam",
        }],
    },
});
import pulumi
import pulumi_databricks as databricks
me = databricks.get_current_user()
this = databricks.SqlEndpoint("this",
    name=f"Endpoint of {me.alphanumeric}",
    cluster_size="Small",
    max_num_clusters=1,
    tags={
        "custom_tags": [{
            "key": "City",
            "value": "Amsterdam",
        }],
    })
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		me, err := databricks.GetCurrentUser(ctx, map[string]interface{}{}, nil)
		if err != nil {
			return err
		}
		_, err = databricks.NewSqlEndpoint(ctx, "this", &databricks.SqlEndpointArgs{
			Name:           pulumi.Sprintf("Endpoint of %v", me.Alphanumeric),
			ClusterSize:    pulumi.String("Small"),
			MaxNumClusters: pulumi.Int(1),
			Tags: &databricks.SqlEndpointTagsArgs{
				CustomTags: databricks.SqlEndpointTagsCustomTagArray{
					&databricks.SqlEndpointTagsCustomTagArgs{
						Key:   pulumi.String("City"),
						Value: pulumi.String("Amsterdam"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var me = Databricks.GetCurrentUser.Invoke();
    var @this = new Databricks.SqlEndpoint("this", new()
    {
        Name = $"Endpoint of {me.Apply(getCurrentUserResult => getCurrentUserResult.Alphanumeric)}",
        ClusterSize = "Small",
        MaxNumClusters = 1,
        Tags = new Databricks.Inputs.SqlEndpointTagsArgs
        {
            CustomTags = new[]
            {
                new Databricks.Inputs.SqlEndpointTagsCustomTagArgs
                {
                    Key = "City",
                    Value = "Amsterdam",
                },
            },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.SqlEndpoint;
import com.pulumi.databricks.SqlEndpointArgs;
import com.pulumi.databricks.inputs.SqlEndpointTagsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        final var me = DatabricksFunctions.getCurrentUser();
        var this_ = new SqlEndpoint("this", SqlEndpointArgs.builder()
            .name(String.format("Endpoint of %s", me.applyValue(getCurrentUserResult -> getCurrentUserResult.alphanumeric())))
            .clusterSize("Small")
            .maxNumClusters(1)
            .tags(SqlEndpointTagsArgs.builder()
                .customTags(SqlEndpointTagsCustomTagArgs.builder()
                    .key("City")
                    .value("Amsterdam")
                    .build())
                .build())
            .build());
    }
}
resources:
  this:
    type: databricks:SqlEndpoint
    properties:
      name: Endpoint of ${me.alphanumeric}
      clusterSize: Small
      maxNumClusters: 1
      tags:
        customTags:
          - key: City
            value: Amsterdam
variables:
  me:
    fn::invoke:
      function: databricks:getCurrentUser
      arguments: {}
Access control
- databricks.Permissions can control which groups or individual users can Can Use or Can Manage SQL warehouses.
- databricks_sql_accesson databricks.Group or databricks_user.
Related resources
The following resources are often used in the same context:
- End to end workspace management guide.
- databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
- databricks.SqlDashboard to manage Databricks SQL Dashboards.
- databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and data access properties for all databricks.SqlEndpoint of workspace.
- databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and more.
Create SqlEndpoint Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new SqlEndpoint(name: string, args: SqlEndpointArgs, opts?: CustomResourceOptions);@overload
def SqlEndpoint(resource_name: str,
                args: SqlEndpointArgs,
                opts: Optional[ResourceOptions] = None)
@overload
def SqlEndpoint(resource_name: str,
                opts: Optional[ResourceOptions] = None,
                cluster_size: Optional[str] = None,
                instance_profile_arn: Optional[str] = None,
                channel: Optional[SqlEndpointChannelArgs] = None,
                data_source_id: Optional[str] = None,
                enable_photon: Optional[bool] = None,
                enable_serverless_compute: Optional[bool] = None,
                auto_stop_mins: Optional[int] = None,
                max_num_clusters: Optional[int] = None,
                min_num_clusters: Optional[int] = None,
                name: Optional[str] = None,
                spot_instance_policy: Optional[str] = None,
                tags: Optional[SqlEndpointTagsArgs] = None,
                warehouse_type: Optional[str] = None)func NewSqlEndpoint(ctx *Context, name string, args SqlEndpointArgs, opts ...ResourceOption) (*SqlEndpoint, error)public SqlEndpoint(string name, SqlEndpointArgs args, CustomResourceOptions? opts = null)
public SqlEndpoint(String name, SqlEndpointArgs args)
public SqlEndpoint(String name, SqlEndpointArgs args, CustomResourceOptions options)
type: databricks:SqlEndpoint
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args SqlEndpointArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var sqlEndpointResource = new Databricks.SqlEndpoint("sqlEndpointResource", new()
{
    ClusterSize = "string",
    InstanceProfileArn = "string",
    Channel = new Databricks.Inputs.SqlEndpointChannelArgs
    {
        DbsqlVersion = "string",
        Name = "string",
    },
    DataSourceId = "string",
    EnablePhoton = false,
    EnableServerlessCompute = false,
    AutoStopMins = 0,
    MaxNumClusters = 0,
    MinNumClusters = 0,
    Name = "string",
    SpotInstancePolicy = "string",
    Tags = new Databricks.Inputs.SqlEndpointTagsArgs
    {
        CustomTags = new[]
        {
            new Databricks.Inputs.SqlEndpointTagsCustomTagArgs
            {
                Key = "string",
                Value = "string",
            },
        },
    },
    WarehouseType = "string",
});
example, err := databricks.NewSqlEndpoint(ctx, "sqlEndpointResource", &databricks.SqlEndpointArgs{
	ClusterSize:        pulumi.String("string"),
	InstanceProfileArn: pulumi.String("string"),
	Channel: &databricks.SqlEndpointChannelArgs{
		DbsqlVersion: pulumi.String("string"),
		Name:         pulumi.String("string"),
	},
	DataSourceId:            pulumi.String("string"),
	EnablePhoton:            pulumi.Bool(false),
	EnableServerlessCompute: pulumi.Bool(false),
	AutoStopMins:            pulumi.Int(0),
	MaxNumClusters:          pulumi.Int(0),
	MinNumClusters:          pulumi.Int(0),
	Name:                    pulumi.String("string"),
	SpotInstancePolicy:      pulumi.String("string"),
	Tags: &databricks.SqlEndpointTagsArgs{
		CustomTags: databricks.SqlEndpointTagsCustomTagArray{
			&databricks.SqlEndpointTagsCustomTagArgs{
				Key:   pulumi.String("string"),
				Value: pulumi.String("string"),
			},
		},
	},
	WarehouseType: pulumi.String("string"),
})
var sqlEndpointResource = new SqlEndpoint("sqlEndpointResource", SqlEndpointArgs.builder()
    .clusterSize("string")
    .instanceProfileArn("string")
    .channel(SqlEndpointChannelArgs.builder()
        .dbsqlVersion("string")
        .name("string")
        .build())
    .dataSourceId("string")
    .enablePhoton(false)
    .enableServerlessCompute(false)
    .autoStopMins(0)
    .maxNumClusters(0)
    .minNumClusters(0)
    .name("string")
    .spotInstancePolicy("string")
    .tags(SqlEndpointTagsArgs.builder()
        .customTags(SqlEndpointTagsCustomTagArgs.builder()
            .key("string")
            .value("string")
            .build())
        .build())
    .warehouseType("string")
    .build());
sql_endpoint_resource = databricks.SqlEndpoint("sqlEndpointResource",
    cluster_size="string",
    instance_profile_arn="string",
    channel={
        "dbsql_version": "string",
        "name": "string",
    },
    data_source_id="string",
    enable_photon=False,
    enable_serverless_compute=False,
    auto_stop_mins=0,
    max_num_clusters=0,
    min_num_clusters=0,
    name="string",
    spot_instance_policy="string",
    tags={
        "custom_tags": [{
            "key": "string",
            "value": "string",
        }],
    },
    warehouse_type="string")
const sqlEndpointResource = new databricks.SqlEndpoint("sqlEndpointResource", {
    clusterSize: "string",
    instanceProfileArn: "string",
    channel: {
        dbsqlVersion: "string",
        name: "string",
    },
    dataSourceId: "string",
    enablePhoton: false,
    enableServerlessCompute: false,
    autoStopMins: 0,
    maxNumClusters: 0,
    minNumClusters: 0,
    name: "string",
    spotInstancePolicy: "string",
    tags: {
        customTags: [{
            key: "string",
            value: "string",
        }],
    },
    warehouseType: "string",
});
type: databricks:SqlEndpoint
properties:
    autoStopMins: 0
    channel:
        dbsqlVersion: string
        name: string
    clusterSize: string
    dataSourceId: string
    enablePhoton: false
    enableServerlessCompute: false
    instanceProfileArn: string
    maxNumClusters: 0
    minNumClusters: 0
    name: string
    spotInstancePolicy: string
    tags:
        customTags:
            - key: string
              value: string
    warehouseType: string
SqlEndpoint Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The SqlEndpoint resource accepts the following input properties:
- ClusterSize string
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- AutoStop intMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
SqlEndpoint Channel 
- block, consisting of following fields:
- DataSource stringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- EnablePhoton bool
- Whether to enable Photon. This field is optional and is enabled by default.
- EnableServerless boolCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- InstanceProfile stringArn 
- MaxNum intClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- MinNum intClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- Name string
- Name of the SQL warehouse. Must be unique.
- SpotInstance stringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- 
SqlEndpoint Tags 
- Databricks tags all endpoint resources with these tags.
- WarehouseType string
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- ClusterSize string
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- AutoStop intMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
SqlEndpoint Channel Args 
- block, consisting of following fields:
- DataSource stringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- EnablePhoton bool
- Whether to enable Photon. This field is optional and is enabled by default.
- EnableServerless boolCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- InstanceProfile stringArn 
- MaxNum intClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- MinNum intClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- Name string
- Name of the SQL warehouse. Must be unique.
- SpotInstance stringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- 
SqlEndpoint Tags Args 
- Databricks tags all endpoint resources with these tags.
- WarehouseType string
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- clusterSize String
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- autoStop IntegerMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
SqlEndpoint Channel 
- block, consisting of following fields:
- dataSource StringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enablePhoton Boolean
- Whether to enable Photon. This field is optional and is enabled by default.
- enableServerless BooleanCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- instanceProfile StringArn 
- maxNum IntegerClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- minNum IntegerClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- name String
- Name of the SQL warehouse. Must be unique.
- spotInstance StringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- 
SqlEndpoint Tags 
- Databricks tags all endpoint resources with these tags.
- warehouseType String
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- clusterSize string
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- autoStop numberMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
SqlEndpoint Channel 
- block, consisting of following fields:
- dataSource stringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enablePhoton boolean
- Whether to enable Photon. This field is optional and is enabled by default.
- enableServerless booleanCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- instanceProfile stringArn 
- maxNum numberClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- minNum numberClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- name string
- Name of the SQL warehouse. Must be unique.
- spotInstance stringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- 
SqlEndpoint Tags 
- Databricks tags all endpoint resources with these tags.
- warehouseType string
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- cluster_size str
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- auto_stop_ intmins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
SqlEndpoint Channel Args 
- block, consisting of following fields:
- data_source_ strid 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable_photon bool
- Whether to enable Photon. This field is optional and is enabled by default.
- enable_serverless_ boolcompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- instance_profile_ strarn 
- max_num_ intclusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- min_num_ intclusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- name str
- Name of the SQL warehouse. Must be unique.
- spot_instance_ strpolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- 
SqlEndpoint Tags Args 
- Databricks tags all endpoint resources with these tags.
- warehouse_type str
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- clusterSize String
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- autoStop NumberMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel Property Map
- block, consisting of following fields:
- dataSource StringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enablePhoton Boolean
- Whether to enable Photon. This field is optional and is enabled by default.
- enableServerless BooleanCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- instanceProfile StringArn 
- maxNum NumberClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- minNum NumberClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- name String
- Name of the SQL warehouse. Must be unique.
- spotInstance StringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- Property Map
- Databricks tags all endpoint resources with these tags.
- warehouseType String
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
Outputs
All input properties are implicitly available as output properties. Additionally, the SqlEndpoint resource produces the following output properties:
- CreatorName string
- The username of the user who created the endpoint.
- Healths
List<SqlEndpoint Health> 
- Health status of the endpoint.
- Id string
- The provider-assigned unique ID for this managed resource.
- JdbcUrl string
- JDBC connection string.
- NumActive intSessions 
- The current number of clusters used by the endpoint.
- NumClusters int
- The current number of clusters used by the endpoint.
- OdbcParams SqlEndpoint Odbc Params 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- State string
- The current state of the endpoint.
- CreatorName string
- The username of the user who created the endpoint.
- Healths
[]SqlEndpoint Health 
- Health status of the endpoint.
- Id string
- The provider-assigned unique ID for this managed resource.
- JdbcUrl string
- JDBC connection string.
- NumActive intSessions 
- The current number of clusters used by the endpoint.
- NumClusters int
- The current number of clusters used by the endpoint.
- OdbcParams SqlEndpoint Odbc Params 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- State string
- The current state of the endpoint.
- creatorName String
- The username of the user who created the endpoint.
- healths
List<SqlEndpoint Health> 
- Health status of the endpoint.
- id String
- The provider-assigned unique ID for this managed resource.
- jdbcUrl String
- JDBC connection string.
- numActive IntegerSessions 
- The current number of clusters used by the endpoint.
- numClusters Integer
- The current number of clusters used by the endpoint.
- odbcParams SqlEndpoint Odbc Params 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- state String
- The current state of the endpoint.
- creatorName string
- The username of the user who created the endpoint.
- healths
SqlEndpoint Health[] 
- Health status of the endpoint.
- id string
- The provider-assigned unique ID for this managed resource.
- jdbcUrl string
- JDBC connection string.
- numActive numberSessions 
- The current number of clusters used by the endpoint.
- numClusters number
- The current number of clusters used by the endpoint.
- odbcParams SqlEndpoint Odbc Params 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- state string
- The current state of the endpoint.
- creator_name str
- The username of the user who created the endpoint.
- healths
Sequence[SqlEndpoint Health] 
- Health status of the endpoint.
- id str
- The provider-assigned unique ID for this managed resource.
- jdbc_url str
- JDBC connection string.
- num_active_ intsessions 
- The current number of clusters used by the endpoint.
- num_clusters int
- The current number of clusters used by the endpoint.
- odbc_params SqlEndpoint Odbc Params 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- state str
- The current state of the endpoint.
- creatorName String
- The username of the user who created the endpoint.
- healths List<Property Map>
- Health status of the endpoint.
- id String
- The provider-assigned unique ID for this managed resource.
- jdbcUrl String
- JDBC connection string.
- numActive NumberSessions 
- The current number of clusters used by the endpoint.
- numClusters Number
- The current number of clusters used by the endpoint.
- odbcParams Property Map
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- state String
- The current state of the endpoint.
Look up Existing SqlEndpoint Resource
Get an existing SqlEndpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: SqlEndpointState, opts?: CustomResourceOptions): SqlEndpoint@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        auto_stop_mins: Optional[int] = None,
        channel: Optional[SqlEndpointChannelArgs] = None,
        cluster_size: Optional[str] = None,
        creator_name: Optional[str] = None,
        data_source_id: Optional[str] = None,
        enable_photon: Optional[bool] = None,
        enable_serverless_compute: Optional[bool] = None,
        healths: Optional[Sequence[SqlEndpointHealthArgs]] = None,
        instance_profile_arn: Optional[str] = None,
        jdbc_url: Optional[str] = None,
        max_num_clusters: Optional[int] = None,
        min_num_clusters: Optional[int] = None,
        name: Optional[str] = None,
        num_active_sessions: Optional[int] = None,
        num_clusters: Optional[int] = None,
        odbc_params: Optional[SqlEndpointOdbcParamsArgs] = None,
        spot_instance_policy: Optional[str] = None,
        state: Optional[str] = None,
        tags: Optional[SqlEndpointTagsArgs] = None,
        warehouse_type: Optional[str] = None) -> SqlEndpointfunc GetSqlEndpoint(ctx *Context, name string, id IDInput, state *SqlEndpointState, opts ...ResourceOption) (*SqlEndpoint, error)public static SqlEndpoint Get(string name, Input<string> id, SqlEndpointState? state, CustomResourceOptions? opts = null)public static SqlEndpoint get(String name, Output<String> id, SqlEndpointState state, CustomResourceOptions options)resources:  _:    type: databricks:SqlEndpoint    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- AutoStop intMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
SqlEndpoint Channel 
- block, consisting of following fields:
- ClusterSize string
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- CreatorName string
- The username of the user who created the endpoint.
- DataSource stringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- EnablePhoton bool
- Whether to enable Photon. This field is optional and is enabled by default.
- EnableServerless boolCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- Healths
List<SqlEndpoint Health> 
- Health status of the endpoint.
- InstanceProfile stringArn 
- JdbcUrl string
- JDBC connection string.
- MaxNum intClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- MinNum intClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- Name string
- Name of the SQL warehouse. Must be unique.
- NumActive intSessions 
- The current number of clusters used by the endpoint.
- NumClusters int
- The current number of clusters used by the endpoint.
- OdbcParams SqlEndpoint Odbc Params 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- SpotInstance stringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- State string
- The current state of the endpoint.
- 
SqlEndpoint Tags 
- Databricks tags all endpoint resources with these tags.
- WarehouseType string
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- AutoStop intMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- Channel
SqlEndpoint Channel Args 
- block, consisting of following fields:
- ClusterSize string
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- CreatorName string
- The username of the user who created the endpoint.
- DataSource stringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- EnablePhoton bool
- Whether to enable Photon. This field is optional and is enabled by default.
- EnableServerless boolCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- Healths
[]SqlEndpoint Health Args 
- Health status of the endpoint.
- InstanceProfile stringArn 
- JdbcUrl string
- JDBC connection string.
- MaxNum intClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- MinNum intClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- Name string
- Name of the SQL warehouse. Must be unique.
- NumActive intSessions 
- The current number of clusters used by the endpoint.
- NumClusters int
- The current number of clusters used by the endpoint.
- OdbcParams SqlEndpoint Odbc Params Args 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- SpotInstance stringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- State string
- The current state of the endpoint.
- 
SqlEndpoint Tags Args 
- Databricks tags all endpoint resources with these tags.
- WarehouseType string
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- autoStop IntegerMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
SqlEndpoint Channel 
- block, consisting of following fields:
- clusterSize String
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- creatorName String
- The username of the user who created the endpoint.
- dataSource StringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enablePhoton Boolean
- Whether to enable Photon. This field is optional and is enabled by default.
- enableServerless BooleanCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- healths
List<SqlEndpoint Health> 
- Health status of the endpoint.
- instanceProfile StringArn 
- jdbcUrl String
- JDBC connection string.
- maxNum IntegerClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- minNum IntegerClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- name String
- Name of the SQL warehouse. Must be unique.
- numActive IntegerSessions 
- The current number of clusters used by the endpoint.
- numClusters Integer
- The current number of clusters used by the endpoint.
- odbcParams SqlEndpoint Odbc Params 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- spotInstance StringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- state String
- The current state of the endpoint.
- 
SqlEndpoint Tags 
- Databricks tags all endpoint resources with these tags.
- warehouseType String
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- autoStop numberMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
SqlEndpoint Channel 
- block, consisting of following fields:
- clusterSize string
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- creatorName string
- The username of the user who created the endpoint.
- dataSource stringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enablePhoton boolean
- Whether to enable Photon. This field is optional and is enabled by default.
- enableServerless booleanCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- healths
SqlEndpoint Health[] 
- Health status of the endpoint.
- instanceProfile stringArn 
- jdbcUrl string
- JDBC connection string.
- maxNum numberClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- minNum numberClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- name string
- Name of the SQL warehouse. Must be unique.
- numActive numberSessions 
- The current number of clusters used by the endpoint.
- numClusters number
- The current number of clusters used by the endpoint.
- odbcParams SqlEndpoint Odbc Params 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- spotInstance stringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- state string
- The current state of the endpoint.
- 
SqlEndpoint Tags 
- Databricks tags all endpoint resources with these tags.
- warehouseType string
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- auto_stop_ intmins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel
SqlEndpoint Channel Args 
- block, consisting of following fields:
- cluster_size str
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- creator_name str
- The username of the user who created the endpoint.
- data_source_ strid 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enable_photon bool
- Whether to enable Photon. This field is optional and is enabled by default.
- enable_serverless_ boolcompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- healths
Sequence[SqlEndpoint Health Args] 
- Health status of the endpoint.
- instance_profile_ strarn 
- jdbc_url str
- JDBC connection string.
- max_num_ intclusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- min_num_ intclusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- name str
- Name of the SQL warehouse. Must be unique.
- num_active_ intsessions 
- The current number of clusters used by the endpoint.
- num_clusters int
- The current number of clusters used by the endpoint.
- odbc_params SqlEndpoint Odbc Params Args 
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- spot_instance_ strpolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- state str
- The current state of the endpoint.
- 
SqlEndpoint Tags Args 
- Databricks tags all endpoint resources with these tags.
- warehouse_type str
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
- autoStop NumberMins 
- Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
- channel Property Map
- block, consisting of following fields:
- clusterSize String
- The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
- creatorName String
- The username of the user who created the endpoint.
- dataSource StringId 
- ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
- enablePhoton Boolean
- Whether to enable Photon. This field is optional and is enabled by default.
- enableServerless BooleanCompute 
- Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly. - For AWS, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.
- For Azure, If omitted, the default is - falsefor most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to- trueif the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.
 
- healths List<Property Map>
- Health status of the endpoint.
- instanceProfile StringArn 
- jdbcUrl String
- JDBC connection string.
- maxNum NumberClusters 
- Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
- minNum NumberClusters 
- Minimum number of clusters available when a SQL warehouse is running. The default is 1.
- name String
- Name of the SQL warehouse. Must be unique.
- numActive NumberSessions 
- The current number of clusters used by the endpoint.
- numClusters Number
- The current number of clusters used by the endpoint.
- odbcParams Property Map
- ODBC connection params: odbc_params.hostname,odbc_params.path,odbc_params.protocol, andodbc_params.port.
- spotInstance StringPolicy 
- The spot policy to use for allocating instances to clusters: COST_OPTIMIZEDorRELIABILITY_OPTIMIZED. This field is optional. Default isCOST_OPTIMIZED.
- state String
- The current state of the endpoint.
- Property Map
- Databricks tags all endpoint resources with these tags.
- warehouseType String
- SQL warehouse type. See for AWS or Azure. Set to PROorCLASSIC. If the fieldenable_serverless_computehas the valuetrueeither explicitly or through the default logic (see that field above for details), the default isPRO, which is required for serverless SQL warehouses. Otherwise, the default isCLASSIC.
Supporting Types
SqlEndpointChannel, SqlEndpointChannelArgs      
- DbsqlVersion string
- Name string
- Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEWandCHANNEL_NAME_CURRENT. Default isCHANNEL_NAME_CURRENT.
- DbsqlVersion string
- Name string
- Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEWandCHANNEL_NAME_CURRENT. Default isCHANNEL_NAME_CURRENT.
- dbsqlVersion String
- name String
- Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEWandCHANNEL_NAME_CURRENT. Default isCHANNEL_NAME_CURRENT.
- dbsqlVersion string
- name string
- Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEWandCHANNEL_NAME_CURRENT. Default isCHANNEL_NAME_CURRENT.
- dbsql_version str
- name str
- Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEWandCHANNEL_NAME_CURRENT. Default isCHANNEL_NAME_CURRENT.
- dbsqlVersion String
- name String
- Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEWandCHANNEL_NAME_CURRENT. Default isCHANNEL_NAME_CURRENT.
SqlEndpointHealth, SqlEndpointHealthArgs      
- Details string
- FailureReason SqlEndpoint Health Failure Reason 
- Message string
- Status string
- Summary string
- Details string
- FailureReason SqlEndpoint Health Failure Reason 
- Message string
- Status string
- Summary string
- details String
- failureReason SqlEndpoint Health Failure Reason 
- message String
- status String
- summary String
- details string
- failureReason SqlEndpoint Health Failure Reason 
- message string
- status string
- summary string
- details String
- failureReason Property Map
- message String
- status String
- summary String
SqlEndpointHealthFailureReason, SqlEndpointHealthFailureReasonArgs          
- Code string
- Parameters Dictionary<string, string>
- Type string
- Code string
- Parameters map[string]string
- Type string
- code String
- parameters Map<String,String>
- type String
- code string
- parameters {[key: string]: string}
- type string
- code str
- parameters Mapping[str, str]
- type str
- code String
- parameters Map<String>
- type String
SqlEndpointOdbcParams, SqlEndpointOdbcParamsArgs        
SqlEndpointTags, SqlEndpointTagsArgs      
SqlEndpointTagsCustomTag, SqlEndpointTagsCustomTagArgs          
Import
You can import a databricks_sql_endpoint resource with ID like the following:
bash
$ pulumi import databricks:index/sqlEndpoint:SqlEndpoint this <endpoint-id>
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the databricksTerraform Provider.