databricks.QualityMonitor
Explore with Pulumi AI
This resource allows you to manage Lakehouse Monitors in Databricks.
A databricks.QualityMonitor is attached to a databricks.SqlTable and can be of type timeseries, snapshot or inference.
Plugin Framework Migration
The quality monitor resource has been migrated from sdkv2 to plugin framework。 If you encounter any problem with this resource and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way export USE_SDK_V2_RESOURCES="databricks.QualityMonitor".
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const sandbox = new databricks.Catalog("sandbox", {
    name: "sandbox",
    comment: "this catalog is managed by terraform",
    properties: {
        purpose: "testing",
    },
});
const things = new databricks.Schema("things", {
    catalogName: sandbox.id,
    name: "things",
    comment: "this database is managed by terraform",
    properties: {
        kind: "various",
    },
});
const myTestTable = new databricks.SqlTable("myTestTable", {
    catalogName: "main",
    schemaName: things.name,
    name: "bar",
    tableType: "MANAGED",
    dataSourceFormat: "DELTA",
    columns: [{
        name: "timestamp",
        type: "int",
    }],
});
const testTimeseriesMonitor = new databricks.QualityMonitor("testTimeseriesMonitor", {
    tableName: pulumi.interpolate`${sandbox.name}.${things.name}.${myTestTable.name}`,
    assetsDir: pulumi.interpolate`/Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}`,
    outputSchemaName: pulumi.interpolate`${sandbox.name}.${things.name}`,
    timeSeries: {
        granularities: ["1 hour"],
        timestampCol: "timestamp",
    },
});
import pulumi
import pulumi_databricks as databricks
sandbox = databricks.Catalog("sandbox",
    name="sandbox",
    comment="this catalog is managed by terraform",
    properties={
        "purpose": "testing",
    })
things = databricks.Schema("things",
    catalog_name=sandbox.id,
    name="things",
    comment="this database is managed by terraform",
    properties={
        "kind": "various",
    })
my_test_table = databricks.SqlTable("myTestTable",
    catalog_name="main",
    schema_name=things.name,
    name="bar",
    table_type="MANAGED",
    data_source_format="DELTA",
    columns=[{
        "name": "timestamp",
        "type": "int",
    }])
test_timeseries_monitor = databricks.QualityMonitor("testTimeseriesMonitor",
    table_name=pulumi.Output.all(
        sandboxName=sandbox.name,
        thingsName=things.name,
        myTestTableName=my_test_table.name
).apply(lambda resolved_outputs: f"{resolved_outputs['sandboxName']}.{resolved_outputs['thingsName']}.{resolved_outputs['myTestTableName']}")
,
    assets_dir=my_test_table.name.apply(lambda name: f"/Shared/provider-test/databricks_quality_monitoring/{name}"),
    output_schema_name=pulumi.Output.all(
        sandboxName=sandbox.name,
        thingsName=things.name
).apply(lambda resolved_outputs: f"{resolved_outputs['sandboxName']}.{resolved_outputs['thingsName']}")
,
    time_series={
        "granularities": ["1 hour"],
        "timestamp_col": "timestamp",
    })
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		sandbox, err := databricks.NewCatalog(ctx, "sandbox", &databricks.CatalogArgs{
			Name:    pulumi.String("sandbox"),
			Comment: pulumi.String("this catalog is managed by terraform"),
			Properties: pulumi.StringMap{
				"purpose": pulumi.String("testing"),
			},
		})
		if err != nil {
			return err
		}
		things, err := databricks.NewSchema(ctx, "things", &databricks.SchemaArgs{
			CatalogName: sandbox.ID(),
			Name:        pulumi.String("things"),
			Comment:     pulumi.String("this database is managed by terraform"),
			Properties: pulumi.StringMap{
				"kind": pulumi.String("various"),
			},
		})
		if err != nil {
			return err
		}
		myTestTable, err := databricks.NewSqlTable(ctx, "myTestTable", &databricks.SqlTableArgs{
			CatalogName:      pulumi.String("main"),
			SchemaName:       things.Name,
			Name:             pulumi.String("bar"),
			TableType:        pulumi.String("MANAGED"),
			DataSourceFormat: pulumi.String("DELTA"),
			Columns: databricks.SqlTableColumnArray{
				&databricks.SqlTableColumnArgs{
					Name: pulumi.String("timestamp"),
					Type: pulumi.String("int"),
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = databricks.NewQualityMonitor(ctx, "testTimeseriesMonitor", &databricks.QualityMonitorArgs{
			TableName: pulumi.All(sandbox.Name, things.Name, myTestTable.Name).ApplyT(func(_args []interface{}) (string, error) {
				sandboxName := _args[0].(string)
				thingsName := _args[1].(string)
				myTestTableName := _args[2].(string)
				return fmt.Sprintf("%v.%v.%v", sandboxName, thingsName, myTestTableName), nil
			}).(pulumi.StringOutput),
			AssetsDir: myTestTable.Name.ApplyT(func(name string) (string, error) {
				return fmt.Sprintf("/Shared/provider-test/databricks_quality_monitoring/%v", name), nil
			}).(pulumi.StringOutput),
			OutputSchemaName: pulumi.All(sandbox.Name, things.Name).ApplyT(func(_args []interface{}) (string, error) {
				sandboxName := _args[0].(string)
				thingsName := _args[1].(string)
				return fmt.Sprintf("%v.%v", sandboxName, thingsName), nil
			}).(pulumi.StringOutput),
			TimeSeries: &databricks.QualityMonitorTimeSeriesArgs{
				Granularities: pulumi.StringArray{
					pulumi.String("1 hour"),
				},
				TimestampCol: pulumi.String("timestamp"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var sandbox = new Databricks.Catalog("sandbox", new()
    {
        Name = "sandbox",
        Comment = "this catalog is managed by terraform",
        Properties = 
        {
            { "purpose", "testing" },
        },
    });
    var things = new Databricks.Schema("things", new()
    {
        CatalogName = sandbox.Id,
        Name = "things",
        Comment = "this database is managed by terraform",
        Properties = 
        {
            { "kind", "various" },
        },
    });
    var myTestTable = new Databricks.SqlTable("myTestTable", new()
    {
        CatalogName = "main",
        SchemaName = things.Name,
        Name = "bar",
        TableType = "MANAGED",
        DataSourceFormat = "DELTA",
        Columns = new[]
        {
            new Databricks.Inputs.SqlTableColumnArgs
            {
                Name = "timestamp",
                Type = "int",
            },
        },
    });
    var testTimeseriesMonitor = new Databricks.QualityMonitor("testTimeseriesMonitor", new()
    {
        TableName = Output.Tuple(sandbox.Name, things.Name, myTestTable.Name).Apply(values =>
        {
            var sandboxName = values.Item1;
            var thingsName = values.Item2;
            var myTestTableName = values.Item3;
            return $"{sandboxName}.{thingsName}.{myTestTableName}";
        }),
        AssetsDir = myTestTable.Name.Apply(name => $"/Shared/provider-test/databricks_quality_monitoring/{name}"),
        OutputSchemaName = Output.Tuple(sandbox.Name, things.Name).Apply(values =>
        {
            var sandboxName = values.Item1;
            var thingsName = values.Item2;
            return $"{sandboxName}.{thingsName}";
        }),
        TimeSeries = new Databricks.Inputs.QualityMonitorTimeSeriesArgs
        {
            Granularities = new[]
            {
                "1 hour",
            },
            TimestampCol = "timestamp",
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.Catalog;
import com.pulumi.databricks.CatalogArgs;
import com.pulumi.databricks.Schema;
import com.pulumi.databricks.SchemaArgs;
import com.pulumi.databricks.SqlTable;
import com.pulumi.databricks.SqlTableArgs;
import com.pulumi.databricks.inputs.SqlTableColumnArgs;
import com.pulumi.databricks.QualityMonitor;
import com.pulumi.databricks.QualityMonitorArgs;
import com.pulumi.databricks.inputs.QualityMonitorTimeSeriesArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var sandbox = new Catalog("sandbox", CatalogArgs.builder()
            .name("sandbox")
            .comment("this catalog is managed by terraform")
            .properties(Map.of("purpose", "testing"))
            .build());
        var things = new Schema("things", SchemaArgs.builder()
            .catalogName(sandbox.id())
            .name("things")
            .comment("this database is managed by terraform")
            .properties(Map.of("kind", "various"))
            .build());
        var myTestTable = new SqlTable("myTestTable", SqlTableArgs.builder()
            .catalogName("main")
            .schemaName(things.name())
            .name("bar")
            .tableType("MANAGED")
            .dataSourceFormat("DELTA")
            .columns(SqlTableColumnArgs.builder()
                .name("timestamp")
                .type("int")
                .build())
            .build());
        var testTimeseriesMonitor = new QualityMonitor("testTimeseriesMonitor", QualityMonitorArgs.builder()
            .tableName(Output.tuple(sandbox.name(), things.name(), myTestTable.name()).applyValue(values -> {
                var sandboxName = values.t1;
                var thingsName = values.t2;
                var myTestTableName = values.t3;
                return String.format("%s.%s.%s", sandboxName,thingsName,myTestTableName);
            }))
            .assetsDir(myTestTable.name().applyValue(name -> String.format("/Shared/provider-test/databricks_quality_monitoring/%s", name)))
            .outputSchemaName(Output.tuple(sandbox.name(), things.name()).applyValue(values -> {
                var sandboxName = values.t1;
                var thingsName = values.t2;
                return String.format("%s.%s", sandboxName,thingsName);
            }))
            .timeSeries(QualityMonitorTimeSeriesArgs.builder()
                .granularities("1 hour")
                .timestampCol("timestamp")
                .build())
            .build());
    }
}
resources:
  sandbox:
    type: databricks:Catalog
    properties:
      name: sandbox
      comment: this catalog is managed by terraform
      properties:
        purpose: testing
  things:
    type: databricks:Schema
    properties:
      catalogName: ${sandbox.id}
      name: things
      comment: this database is managed by terraform
      properties:
        kind: various
  myTestTable:
    type: databricks:SqlTable
    properties:
      catalogName: main
      schemaName: ${things.name}
      name: bar
      tableType: MANAGED
      dataSourceFormat: DELTA
      columns:
        - name: timestamp
          type: int
  testTimeseriesMonitor:
    type: databricks:QualityMonitor
    properties:
      tableName: ${sandbox.name}.${things.name}.${myTestTable.name}
      assetsDir: /Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}
      outputSchemaName: ${sandbox.name}.${things.name}
      timeSeries:
        granularities:
          - 1 hour
        timestampCol: timestamp
Inference Monitor
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const testMonitorInference = new databricks.QualityMonitor("testMonitorInference", {
    tableName: `${sandbox.name}.${things.name}.${myTestTable.name}`,
    assetsDir: `/Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}`,
    outputSchemaName: `${sandbox.name}.${things.name}`,
    inferenceLog: {
        granularities: ["1 hour"],
        timestampCol: "timestamp",
        predictionCol: "prediction",
        modelIdCol: "model_id",
        problemType: "PROBLEM_TYPE_REGRESSION",
    },
});
import pulumi
import pulumi_databricks as databricks
test_monitor_inference = databricks.QualityMonitor("testMonitorInference",
    table_name=f"{sandbox['name']}.{things['name']}.{my_test_table['name']}",
    assets_dir=f"/Shared/provider-test/databricks_quality_monitoring/{my_test_table['name']}",
    output_schema_name=f"{sandbox['name']}.{things['name']}",
    inference_log={
        "granularities": ["1 hour"],
        "timestamp_col": "timestamp",
        "prediction_col": "prediction",
        "model_id_col": "model_id",
        "problem_type": "PROBLEM_TYPE_REGRESSION",
    })
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewQualityMonitor(ctx, "testMonitorInference", &databricks.QualityMonitorArgs{
			TableName:        pulumi.Sprintf("%v.%v.%v", sandbox.Name, things.Name, myTestTable.Name),
			AssetsDir:        pulumi.Sprintf("/Shared/provider-test/databricks_quality_monitoring/%v", myTestTable.Name),
			OutputSchemaName: pulumi.Sprintf("%v.%v", sandbox.Name, things.Name),
			InferenceLog: &databricks.QualityMonitorInferenceLogArgs{
				Granularities: pulumi.StringArray{
					pulumi.String("1 hour"),
				},
				TimestampCol:  pulumi.String("timestamp"),
				PredictionCol: pulumi.String("prediction"),
				ModelIdCol:    pulumi.String("model_id"),
				ProblemType:   pulumi.String("PROBLEM_TYPE_REGRESSION"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var testMonitorInference = new Databricks.QualityMonitor("testMonitorInference", new()
    {
        TableName = $"{sandbox.Name}.{things.Name}.{myTestTable.Name}",
        AssetsDir = $"/Shared/provider-test/databricks_quality_monitoring/{myTestTable.Name}",
        OutputSchemaName = $"{sandbox.Name}.{things.Name}",
        InferenceLog = new Databricks.Inputs.QualityMonitorInferenceLogArgs
        {
            Granularities = new[]
            {
                "1 hour",
            },
            TimestampCol = "timestamp",
            PredictionCol = "prediction",
            ModelIdCol = "model_id",
            ProblemType = "PROBLEM_TYPE_REGRESSION",
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.QualityMonitor;
import com.pulumi.databricks.QualityMonitorArgs;
import com.pulumi.databricks.inputs.QualityMonitorInferenceLogArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var testMonitorInference = new QualityMonitor("testMonitorInference", QualityMonitorArgs.builder()
            .tableName(String.format("%s.%s.%s", sandbox.name(),things.name(),myTestTable.name()))
            .assetsDir(String.format("/Shared/provider-test/databricks_quality_monitoring/%s", myTestTable.name()))
            .outputSchemaName(String.format("%s.%s", sandbox.name(),things.name()))
            .inferenceLog(QualityMonitorInferenceLogArgs.builder()
                .granularities("1 hour")
                .timestampCol("timestamp")
                .predictionCol("prediction")
                .modelIdCol("model_id")
                .problemType("PROBLEM_TYPE_REGRESSION")
                .build())
            .build());
    }
}
resources:
  testMonitorInference:
    type: databricks:QualityMonitor
    properties:
      tableName: ${sandbox.name}.${things.name}.${myTestTable.name}
      assetsDir: /Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}
      outputSchemaName: ${sandbox.name}.${things.name}
      inferenceLog:
        granularities:
          - 1 hour
        timestampCol: timestamp
        predictionCol: prediction
        modelIdCol: model_id
        problemType: PROBLEM_TYPE_REGRESSION
Snapshot Monitor
import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";
const testMonitorInference = new databricks.QualityMonitor("testMonitorInference", {
    tableName: `${sandbox.name}.${things.name}.${myTestTable.name}`,
    assetsDir: `/Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}`,
    outputSchemaName: `${sandbox.name}.${things.name}`,
    snapshot: {},
});
import pulumi
import pulumi_databricks as databricks
test_monitor_inference = databricks.QualityMonitor("testMonitorInference",
    table_name=f"{sandbox['name']}.{things['name']}.{my_test_table['name']}",
    assets_dir=f"/Shared/provider-test/databricks_quality_monitoring/{my_test_table['name']}",
    output_schema_name=f"{sandbox['name']}.{things['name']}",
    snapshot={})
package main
import (
	"fmt"
	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := databricks.NewQualityMonitor(ctx, "testMonitorInference", &databricks.QualityMonitorArgs{
			TableName:        pulumi.Sprintf("%v.%v.%v", sandbox.Name, things.Name, myTestTable.Name),
			AssetsDir:        pulumi.Sprintf("/Shared/provider-test/databricks_quality_monitoring/%v", myTestTable.Name),
			OutputSchemaName: pulumi.Sprintf("%v.%v", sandbox.Name, things.Name),
			Snapshot:         &databricks.QualityMonitorSnapshotArgs{},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;
return await Deployment.RunAsync(() => 
{
    var testMonitorInference = new Databricks.QualityMonitor("testMonitorInference", new()
    {
        TableName = $"{sandbox.Name}.{things.Name}.{myTestTable.Name}",
        AssetsDir = $"/Shared/provider-test/databricks_quality_monitoring/{myTestTable.Name}",
        OutputSchemaName = $"{sandbox.Name}.{things.Name}",
        Snapshot = null,
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.QualityMonitor;
import com.pulumi.databricks.QualityMonitorArgs;
import com.pulumi.databricks.inputs.QualityMonitorSnapshotArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var testMonitorInference = new QualityMonitor("testMonitorInference", QualityMonitorArgs.builder()
            .tableName(String.format("%s.%s.%s", sandbox.name(),things.name(),myTestTable.name()))
            .assetsDir(String.format("/Shared/provider-test/databricks_quality_monitoring/%s", myTestTable.name()))
            .outputSchemaName(String.format("%s.%s", sandbox.name(),things.name()))
            .snapshot()
            .build());
    }
}
resources:
  testMonitorInference:
    type: databricks:QualityMonitor
    properties:
      tableName: ${sandbox.name}.${things.name}.${myTestTable.name}
      assetsDir: /Shared/provider-test/databricks_quality_monitoring/${myTestTable.name}
      outputSchemaName: ${sandbox.name}.${things.name}
      snapshot: {}
Related Resources
The following resources are often used in the same context:
- databricks.Catalog
- databricks.Schema
- databricks.SqlTable
Create QualityMonitor Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new QualityMonitor(name: string, args: QualityMonitorArgs, opts?: CustomResourceOptions);@overload
def QualityMonitor(resource_name: str,
                   args: QualityMonitorArgs,
                   opts: Optional[ResourceOptions] = None)
@overload
def QualityMonitor(resource_name: str,
                   opts: Optional[ResourceOptions] = None,
                   assets_dir: Optional[str] = None,
                   table_name: Optional[str] = None,
                   output_schema_name: Optional[str] = None,
                   inference_log: Optional[QualityMonitorInferenceLogArgs] = None,
                   data_classification_config: Optional[QualityMonitorDataClassificationConfigArgs] = None,
                   latest_monitor_failure_msg: Optional[str] = None,
                   monitor_id: Optional[str] = None,
                   notifications: Optional[QualityMonitorNotificationsArgs] = None,
                   custom_metrics: Optional[Sequence[QualityMonitorCustomMetricArgs]] = None,
                   schedule: Optional[QualityMonitorScheduleArgs] = None,
                   skip_builtin_dashboard: Optional[bool] = None,
                   slicing_exprs: Optional[Sequence[str]] = None,
                   snapshot: Optional[QualityMonitorSnapshotArgs] = None,
                   baseline_table_name: Optional[str] = None,
                   time_series: Optional[QualityMonitorTimeSeriesArgs] = None,
                   warehouse_id: Optional[str] = None)func NewQualityMonitor(ctx *Context, name string, args QualityMonitorArgs, opts ...ResourceOption) (*QualityMonitor, error)public QualityMonitor(string name, QualityMonitorArgs args, CustomResourceOptions? opts = null)
public QualityMonitor(String name, QualityMonitorArgs args)
public QualityMonitor(String name, QualityMonitorArgs args, CustomResourceOptions options)
type: databricks:QualityMonitor
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args QualityMonitorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args QualityMonitorArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args QualityMonitorArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args QualityMonitorArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args QualityMonitorArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var qualityMonitorResource = new Databricks.QualityMonitor("qualityMonitorResource", new()
{
    AssetsDir = "string",
    TableName = "string",
    OutputSchemaName = "string",
    InferenceLog = new Databricks.Inputs.QualityMonitorInferenceLogArgs
    {
        Granularities = new[]
        {
            "string",
        },
        ModelIdCol = "string",
        PredictionCol = "string",
        ProblemType = "string",
        TimestampCol = "string",
        LabelCol = "string",
        PredictionProbaCol = "string",
    },
    DataClassificationConfig = new Databricks.Inputs.QualityMonitorDataClassificationConfigArgs
    {
        Enabled = false,
    },
    LatestMonitorFailureMsg = "string",
    MonitorId = "string",
    Notifications = new Databricks.Inputs.QualityMonitorNotificationsArgs
    {
        OnFailure = new Databricks.Inputs.QualityMonitorNotificationsOnFailureArgs
        {
            EmailAddresses = new[]
            {
                "string",
            },
        },
        OnNewClassificationTagDetected = new Databricks.Inputs.QualityMonitorNotificationsOnNewClassificationTagDetectedArgs
        {
            EmailAddresses = new[]
            {
                "string",
            },
        },
    },
    CustomMetrics = new[]
    {
        new Databricks.Inputs.QualityMonitorCustomMetricArgs
        {
            Definition = "string",
            InputColumns = new[]
            {
                "string",
            },
            Name = "string",
            OutputDataType = "string",
            Type = "string",
        },
    },
    Schedule = new Databricks.Inputs.QualityMonitorScheduleArgs
    {
        QuartzCronExpression = "string",
        TimezoneId = "string",
        PauseStatus = "string",
    },
    SkipBuiltinDashboard = false,
    SlicingExprs = new[]
    {
        "string",
    },
    Snapshot = null,
    BaselineTableName = "string",
    TimeSeries = new Databricks.Inputs.QualityMonitorTimeSeriesArgs
    {
        Granularities = new[]
        {
            "string",
        },
        TimestampCol = "string",
    },
    WarehouseId = "string",
});
example, err := databricks.NewQualityMonitor(ctx, "qualityMonitorResource", &databricks.QualityMonitorArgs{
	AssetsDir:        pulumi.String("string"),
	TableName:        pulumi.String("string"),
	OutputSchemaName: pulumi.String("string"),
	InferenceLog: &databricks.QualityMonitorInferenceLogArgs{
		Granularities: pulumi.StringArray{
			pulumi.String("string"),
		},
		ModelIdCol:         pulumi.String("string"),
		PredictionCol:      pulumi.String("string"),
		ProblemType:        pulumi.String("string"),
		TimestampCol:       pulumi.String("string"),
		LabelCol:           pulumi.String("string"),
		PredictionProbaCol: pulumi.String("string"),
	},
	DataClassificationConfig: &databricks.QualityMonitorDataClassificationConfigArgs{
		Enabled: pulumi.Bool(false),
	},
	LatestMonitorFailureMsg: pulumi.String("string"),
	MonitorId:               pulumi.String("string"),
	Notifications: &databricks.QualityMonitorNotificationsArgs{
		OnFailure: &databricks.QualityMonitorNotificationsOnFailureArgs{
			EmailAddresses: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
		OnNewClassificationTagDetected: &databricks.QualityMonitorNotificationsOnNewClassificationTagDetectedArgs{
			EmailAddresses: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
	},
	CustomMetrics: databricks.QualityMonitorCustomMetricArray{
		&databricks.QualityMonitorCustomMetricArgs{
			Definition: pulumi.String("string"),
			InputColumns: pulumi.StringArray{
				pulumi.String("string"),
			},
			Name:           pulumi.String("string"),
			OutputDataType: pulumi.String("string"),
			Type:           pulumi.String("string"),
		},
	},
	Schedule: &databricks.QualityMonitorScheduleArgs{
		QuartzCronExpression: pulumi.String("string"),
		TimezoneId:           pulumi.String("string"),
		PauseStatus:          pulumi.String("string"),
	},
	SkipBuiltinDashboard: pulumi.Bool(false),
	SlicingExprs: pulumi.StringArray{
		pulumi.String("string"),
	},
	Snapshot:          &databricks.QualityMonitorSnapshotArgs{},
	BaselineTableName: pulumi.String("string"),
	TimeSeries: &databricks.QualityMonitorTimeSeriesArgs{
		Granularities: pulumi.StringArray{
			pulumi.String("string"),
		},
		TimestampCol: pulumi.String("string"),
	},
	WarehouseId: pulumi.String("string"),
})
var qualityMonitorResource = new QualityMonitor("qualityMonitorResource", QualityMonitorArgs.builder()
    .assetsDir("string")
    .tableName("string")
    .outputSchemaName("string")
    .inferenceLog(QualityMonitorInferenceLogArgs.builder()
        .granularities("string")
        .modelIdCol("string")
        .predictionCol("string")
        .problemType("string")
        .timestampCol("string")
        .labelCol("string")
        .predictionProbaCol("string")
        .build())
    .dataClassificationConfig(QualityMonitorDataClassificationConfigArgs.builder()
        .enabled(false)
        .build())
    .latestMonitorFailureMsg("string")
    .monitorId("string")
    .notifications(QualityMonitorNotificationsArgs.builder()
        .onFailure(QualityMonitorNotificationsOnFailureArgs.builder()
            .emailAddresses("string")
            .build())
        .onNewClassificationTagDetected(QualityMonitorNotificationsOnNewClassificationTagDetectedArgs.builder()
            .emailAddresses("string")
            .build())
        .build())
    .customMetrics(QualityMonitorCustomMetricArgs.builder()
        .definition("string")
        .inputColumns("string")
        .name("string")
        .outputDataType("string")
        .type("string")
        .build())
    .schedule(QualityMonitorScheduleArgs.builder()
        .quartzCronExpression("string")
        .timezoneId("string")
        .pauseStatus("string")
        .build())
    .skipBuiltinDashboard(false)
    .slicingExprs("string")
    .snapshot()
    .baselineTableName("string")
    .timeSeries(QualityMonitorTimeSeriesArgs.builder()
        .granularities("string")
        .timestampCol("string")
        .build())
    .warehouseId("string")
    .build());
quality_monitor_resource = databricks.QualityMonitor("qualityMonitorResource",
    assets_dir="string",
    table_name="string",
    output_schema_name="string",
    inference_log={
        "granularities": ["string"],
        "model_id_col": "string",
        "prediction_col": "string",
        "problem_type": "string",
        "timestamp_col": "string",
        "label_col": "string",
        "prediction_proba_col": "string",
    },
    data_classification_config={
        "enabled": False,
    },
    latest_monitor_failure_msg="string",
    monitor_id="string",
    notifications={
        "on_failure": {
            "email_addresses": ["string"],
        },
        "on_new_classification_tag_detected": {
            "email_addresses": ["string"],
        },
    },
    custom_metrics=[{
        "definition": "string",
        "input_columns": ["string"],
        "name": "string",
        "output_data_type": "string",
        "type": "string",
    }],
    schedule={
        "quartz_cron_expression": "string",
        "timezone_id": "string",
        "pause_status": "string",
    },
    skip_builtin_dashboard=False,
    slicing_exprs=["string"],
    snapshot={},
    baseline_table_name="string",
    time_series={
        "granularities": ["string"],
        "timestamp_col": "string",
    },
    warehouse_id="string")
const qualityMonitorResource = new databricks.QualityMonitor("qualityMonitorResource", {
    assetsDir: "string",
    tableName: "string",
    outputSchemaName: "string",
    inferenceLog: {
        granularities: ["string"],
        modelIdCol: "string",
        predictionCol: "string",
        problemType: "string",
        timestampCol: "string",
        labelCol: "string",
        predictionProbaCol: "string",
    },
    dataClassificationConfig: {
        enabled: false,
    },
    latestMonitorFailureMsg: "string",
    monitorId: "string",
    notifications: {
        onFailure: {
            emailAddresses: ["string"],
        },
        onNewClassificationTagDetected: {
            emailAddresses: ["string"],
        },
    },
    customMetrics: [{
        definition: "string",
        inputColumns: ["string"],
        name: "string",
        outputDataType: "string",
        type: "string",
    }],
    schedule: {
        quartzCronExpression: "string",
        timezoneId: "string",
        pauseStatus: "string",
    },
    skipBuiltinDashboard: false,
    slicingExprs: ["string"],
    snapshot: {},
    baselineTableName: "string",
    timeSeries: {
        granularities: ["string"],
        timestampCol: "string",
    },
    warehouseId: "string",
});
type: databricks:QualityMonitor
properties:
    assetsDir: string
    baselineTableName: string
    customMetrics:
        - definition: string
          inputColumns:
            - string
          name: string
          outputDataType: string
          type: string
    dataClassificationConfig:
        enabled: false
    inferenceLog:
        granularities:
            - string
        labelCol: string
        modelIdCol: string
        predictionCol: string
        predictionProbaCol: string
        problemType: string
        timestampCol: string
    latestMonitorFailureMsg: string
    monitorId: string
    notifications:
        onFailure:
            emailAddresses:
                - string
        onNewClassificationTagDetected:
            emailAddresses:
                - string
    outputSchemaName: string
    schedule:
        pauseStatus: string
        quartzCronExpression: string
        timezoneId: string
    skipBuiltinDashboard: false
    slicingExprs:
        - string
    snapshot: {}
    tableName: string
    timeSeries:
        granularities:
            - string
        timestampCol: string
    warehouseId: string
QualityMonitor Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The QualityMonitor resource accepts the following input properties:
- AssetsDir string
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- OutputSchema stringName 
- Schema where output metric tables are created
- TableName string
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- BaselineTable stringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- CustomMetrics List<QualityMonitor Custom Metric> 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- DataClassification QualityConfig Monitor Data Classification Config 
- The data classification config for the monitor
- InferenceLog QualityMonitor Inference Log 
- Configuration for the inference log monitor
- LatestMonitor stringFailure Msg 
- MonitorId string
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- Notifications
QualityMonitor Notifications 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- Schedule
QualityMonitor Schedule 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- SkipBuiltin boolDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- SlicingExprs List<string>
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- Snapshot
QualityMonitor Snapshot 
- Configuration for monitoring snapshot tables.
- TimeSeries QualityMonitor Time Series 
- Configuration for monitoring timeseries tables.
- WarehouseId string
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- AssetsDir string
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- OutputSchema stringName 
- Schema where output metric tables are created
- TableName string
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- BaselineTable stringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- CustomMetrics []QualityMonitor Custom Metric Args 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- DataClassification QualityConfig Monitor Data Classification Config Args 
- The data classification config for the monitor
- InferenceLog QualityMonitor Inference Log Args 
- Configuration for the inference log monitor
- LatestMonitor stringFailure Msg 
- MonitorId string
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- Notifications
QualityMonitor Notifications Args 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- Schedule
QualityMonitor Schedule Args 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- SkipBuiltin boolDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- SlicingExprs []string
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- Snapshot
QualityMonitor Snapshot Args 
- Configuration for monitoring snapshot tables.
- TimeSeries QualityMonitor Time Series Args 
- Configuration for monitoring timeseries tables.
- WarehouseId string
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- assetsDir String
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- outputSchema StringName 
- Schema where output metric tables are created
- tableName String
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- baselineTable StringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- customMetrics List<QualityMonitor Custom Metric> 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- dataClassification QualityConfig Monitor Data Classification Config 
- The data classification config for the monitor
- inferenceLog QualityMonitor Inference Log 
- Configuration for the inference log monitor
- latestMonitor StringFailure Msg 
- monitorId String
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- notifications
QualityMonitor Notifications 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- schedule
QualityMonitor Schedule 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- skipBuiltin BooleanDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- slicingExprs List<String>
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- snapshot
QualityMonitor Snapshot 
- Configuration for monitoring snapshot tables.
- timeSeries QualityMonitor Time Series 
- Configuration for monitoring timeseries tables.
- warehouseId String
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- assetsDir string
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- outputSchema stringName 
- Schema where output metric tables are created
- tableName string
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- baselineTable stringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- customMetrics QualityMonitor Custom Metric[] 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- dataClassification QualityConfig Monitor Data Classification Config 
- The data classification config for the monitor
- inferenceLog QualityMonitor Inference Log 
- Configuration for the inference log monitor
- latestMonitor stringFailure Msg 
- monitorId string
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- notifications
QualityMonitor Notifications 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- schedule
QualityMonitor Schedule 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- skipBuiltin booleanDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- slicingExprs string[]
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- snapshot
QualityMonitor Snapshot 
- Configuration for monitoring snapshot tables.
- timeSeries QualityMonitor Time Series 
- Configuration for monitoring timeseries tables.
- warehouseId string
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- assets_dir str
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- output_schema_ strname 
- Schema where output metric tables are created
- table_name str
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- baseline_table_ strname 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- custom_metrics Sequence[QualityMonitor Custom Metric Args] 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- data_classification_ Qualityconfig Monitor Data Classification Config Args 
- The data classification config for the monitor
- inference_log QualityMonitor Inference Log Args 
- Configuration for the inference log monitor
- latest_monitor_ strfailure_ msg 
- monitor_id str
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- notifications
QualityMonitor Notifications Args 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- schedule
QualityMonitor Schedule Args 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- skip_builtin_ booldashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- slicing_exprs Sequence[str]
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- snapshot
QualityMonitor Snapshot Args 
- Configuration for monitoring snapshot tables.
- time_series QualityMonitor Time Series Args 
- Configuration for monitoring timeseries tables.
- warehouse_id str
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- assetsDir String
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- outputSchema StringName 
- Schema where output metric tables are created
- tableName String
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- baselineTable StringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- customMetrics List<Property Map>
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- dataClassification Property MapConfig 
- The data classification config for the monitor
- inferenceLog Property Map
- Configuration for the inference log monitor
- latestMonitor StringFailure Msg 
- monitorId String
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- notifications Property Map
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- schedule Property Map
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- skipBuiltin BooleanDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- slicingExprs List<String>
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- snapshot Property Map
- Configuration for monitoring snapshot tables.
- timeSeries Property Map
- Configuration for monitoring timeseries tables.
- warehouseId String
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
Outputs
All input properties are implicitly available as output properties. Additionally, the QualityMonitor resource produces the following output properties:
- DashboardId string
- The ID of the generated dashboard.
- DriftMetrics stringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- Id string
- The provider-assigned unique ID for this managed resource.
- MonitorVersion string
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- ProfileMetrics stringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- Status string
- Status of the Monitor
- DashboardId string
- The ID of the generated dashboard.
- DriftMetrics stringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- Id string
- The provider-assigned unique ID for this managed resource.
- MonitorVersion string
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- ProfileMetrics stringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- Status string
- Status of the Monitor
- dashboardId String
- The ID of the generated dashboard.
- driftMetrics StringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- id String
- The provider-assigned unique ID for this managed resource.
- monitorVersion String
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- profileMetrics StringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- status String
- Status of the Monitor
- dashboardId string
- The ID of the generated dashboard.
- driftMetrics stringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- id string
- The provider-assigned unique ID for this managed resource.
- monitorVersion string
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- profileMetrics stringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- status string
- Status of the Monitor
- dashboard_id str
- The ID of the generated dashboard.
- drift_metrics_ strtable_ name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- id str
- The provider-assigned unique ID for this managed resource.
- monitor_version str
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- profile_metrics_ strtable_ name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- status str
- Status of the Monitor
- dashboardId String
- The ID of the generated dashboard.
- driftMetrics StringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- id String
- The provider-assigned unique ID for this managed resource.
- monitorVersion String
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- profileMetrics StringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- status String
- Status of the Monitor
Look up Existing QualityMonitor Resource
Get an existing QualityMonitor resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: QualityMonitorState, opts?: CustomResourceOptions): QualityMonitor@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        assets_dir: Optional[str] = None,
        baseline_table_name: Optional[str] = None,
        custom_metrics: Optional[Sequence[QualityMonitorCustomMetricArgs]] = None,
        dashboard_id: Optional[str] = None,
        data_classification_config: Optional[QualityMonitorDataClassificationConfigArgs] = None,
        drift_metrics_table_name: Optional[str] = None,
        inference_log: Optional[QualityMonitorInferenceLogArgs] = None,
        latest_monitor_failure_msg: Optional[str] = None,
        monitor_id: Optional[str] = None,
        monitor_version: Optional[str] = None,
        notifications: Optional[QualityMonitorNotificationsArgs] = None,
        output_schema_name: Optional[str] = None,
        profile_metrics_table_name: Optional[str] = None,
        schedule: Optional[QualityMonitorScheduleArgs] = None,
        skip_builtin_dashboard: Optional[bool] = None,
        slicing_exprs: Optional[Sequence[str]] = None,
        snapshot: Optional[QualityMonitorSnapshotArgs] = None,
        status: Optional[str] = None,
        table_name: Optional[str] = None,
        time_series: Optional[QualityMonitorTimeSeriesArgs] = None,
        warehouse_id: Optional[str] = None) -> QualityMonitorfunc GetQualityMonitor(ctx *Context, name string, id IDInput, state *QualityMonitorState, opts ...ResourceOption) (*QualityMonitor, error)public static QualityMonitor Get(string name, Input<string> id, QualityMonitorState? state, CustomResourceOptions? opts = null)public static QualityMonitor get(String name, Output<String> id, QualityMonitorState state, CustomResourceOptions options)resources:  _:    type: databricks:QualityMonitor    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- AssetsDir string
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- BaselineTable stringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- CustomMetrics List<QualityMonitor Custom Metric> 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- DashboardId string
- The ID of the generated dashboard.
- DataClassification QualityConfig Monitor Data Classification Config 
- The data classification config for the monitor
- DriftMetrics stringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- InferenceLog QualityMonitor Inference Log 
- Configuration for the inference log monitor
- LatestMonitor stringFailure Msg 
- MonitorId string
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- MonitorVersion string
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- Notifications
QualityMonitor Notifications 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- OutputSchema stringName 
- Schema where output metric tables are created
- ProfileMetrics stringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- Schedule
QualityMonitor Schedule 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- SkipBuiltin boolDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- SlicingExprs List<string>
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- Snapshot
QualityMonitor Snapshot 
- Configuration for monitoring snapshot tables.
- Status string
- Status of the Monitor
- TableName string
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- TimeSeries QualityMonitor Time Series 
- Configuration for monitoring timeseries tables.
- WarehouseId string
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- AssetsDir string
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- BaselineTable stringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- CustomMetrics []QualityMonitor Custom Metric Args 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- DashboardId string
- The ID of the generated dashboard.
- DataClassification QualityConfig Monitor Data Classification Config Args 
- The data classification config for the monitor
- DriftMetrics stringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- InferenceLog QualityMonitor Inference Log Args 
- Configuration for the inference log monitor
- LatestMonitor stringFailure Msg 
- MonitorId string
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- MonitorVersion string
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- Notifications
QualityMonitor Notifications Args 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- OutputSchema stringName 
- Schema where output metric tables are created
- ProfileMetrics stringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- Schedule
QualityMonitor Schedule Args 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- SkipBuiltin boolDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- SlicingExprs []string
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- Snapshot
QualityMonitor Snapshot Args 
- Configuration for monitoring snapshot tables.
- Status string
- Status of the Monitor
- TableName string
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- TimeSeries QualityMonitor Time Series Args 
- Configuration for monitoring timeseries tables.
- WarehouseId string
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- assetsDir String
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- baselineTable StringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- customMetrics List<QualityMonitor Custom Metric> 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- dashboardId String
- The ID of the generated dashboard.
- dataClassification QualityConfig Monitor Data Classification Config 
- The data classification config for the monitor
- driftMetrics StringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- inferenceLog QualityMonitor Inference Log 
- Configuration for the inference log monitor
- latestMonitor StringFailure Msg 
- monitorId String
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- monitorVersion String
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- notifications
QualityMonitor Notifications 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- outputSchema StringName 
- Schema where output metric tables are created
- profileMetrics StringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- schedule
QualityMonitor Schedule 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- skipBuiltin BooleanDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- slicingExprs List<String>
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- snapshot
QualityMonitor Snapshot 
- Configuration for monitoring snapshot tables.
- status String
- Status of the Monitor
- tableName String
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- timeSeries QualityMonitor Time Series 
- Configuration for monitoring timeseries tables.
- warehouseId String
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- assetsDir string
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- baselineTable stringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- customMetrics QualityMonitor Custom Metric[] 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- dashboardId string
- The ID of the generated dashboard.
- dataClassification QualityConfig Monitor Data Classification Config 
- The data classification config for the monitor
- driftMetrics stringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- inferenceLog QualityMonitor Inference Log 
- Configuration for the inference log monitor
- latestMonitor stringFailure Msg 
- monitorId string
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- monitorVersion string
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- notifications
QualityMonitor Notifications 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- outputSchema stringName 
- Schema where output metric tables are created
- profileMetrics stringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- schedule
QualityMonitor Schedule 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- skipBuiltin booleanDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- slicingExprs string[]
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- snapshot
QualityMonitor Snapshot 
- Configuration for monitoring snapshot tables.
- status string
- Status of the Monitor
- tableName string
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- timeSeries QualityMonitor Time Series 
- Configuration for monitoring timeseries tables.
- warehouseId string
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- assets_dir str
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- baseline_table_ strname 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- custom_metrics Sequence[QualityMonitor Custom Metric Args] 
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- dashboard_id str
- The ID of the generated dashboard.
- data_classification_ Qualityconfig Monitor Data Classification Config Args 
- The data classification config for the monitor
- drift_metrics_ strtable_ name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- inference_log QualityMonitor Inference Log Args 
- Configuration for the inference log monitor
- latest_monitor_ strfailure_ msg 
- monitor_id str
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- monitor_version str
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- notifications
QualityMonitor Notifications Args 
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- output_schema_ strname 
- Schema where output metric tables are created
- profile_metrics_ strtable_ name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- schedule
QualityMonitor Schedule Args 
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- skip_builtin_ booldashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- slicing_exprs Sequence[str]
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- snapshot
QualityMonitor Snapshot Args 
- Configuration for monitoring snapshot tables.
- status str
- Status of the Monitor
- table_name str
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- time_series QualityMonitor Time Series Args 
- Configuration for monitoring timeseries tables.
- warehouse_id str
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
- assetsDir String
- The directory to store the monitoring assets (Eg. Dashboard and Metric Tables)
- baselineTable StringName 
- Name of the baseline table from which drift metrics are computed from.Columns in the monitored table should also be present in the baseline table.
- customMetrics List<Property Map>
- Custom metrics to compute on the monitored table. These can be aggregate metrics, derived metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across time windows).
- dashboardId String
- The ID of the generated dashboard.
- dataClassification Property MapConfig 
- The data classification config for the monitor
- driftMetrics StringTable Name 
- The full name of the drift metrics table. Format: catalog_name.schema_name.table_name.
- inferenceLog Property Map
- Configuration for the inference log monitor
- latestMonitor StringFailure Msg 
- monitorId String
- ID of this monitor is the same as the full table name of the format {catalog}.{schema_name}.{table_name}
- monitorVersion String
- The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted
- notifications Property Map
- The notification settings for the monitor. The following optional blocks are supported, each consisting of the single string array field with name email_addressescontaining a list of emails to notify:
- outputSchema StringName 
- Schema where output metric tables are created
- profileMetrics StringTable Name 
- The full name of the profile metrics table. Format: catalog_name.schema_name.table_name.
- schedule Property Map
- The schedule for automatically updating and refreshing metric tables. This block consists of following fields:
- skipBuiltin BooleanDashboard 
- Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation).
- slicingExprs List<String>
- List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
- snapshot Property Map
- Configuration for monitoring snapshot tables.
- status String
- Status of the Monitor
- tableName String
- The full name of the table to attach the monitor too. Its of the format {catalog}.{schema}.{tableName}
- timeSeries Property Map
- Configuration for monitoring timeseries tables.
- warehouseId String
- Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation)
Supporting Types
QualityMonitorCustomMetric, QualityMonitorCustomMetricArgs        
- Definition string
- create metric definition
- InputColumns List<string>
- Columns on the monitored table to apply the custom metrics to.
- Name string
- Name of the custom metric.
- OutputData stringType 
- The output type of the custom metric.
- Type string
- The type of the custom metric.
- Definition string
- create metric definition
- InputColumns []string
- Columns on the monitored table to apply the custom metrics to.
- Name string
- Name of the custom metric.
- OutputData stringType 
- The output type of the custom metric.
- Type string
- The type of the custom metric.
- definition String
- create metric definition
- inputColumns List<String>
- Columns on the monitored table to apply the custom metrics to.
- name String
- Name of the custom metric.
- outputData StringType 
- The output type of the custom metric.
- type String
- The type of the custom metric.
- definition string
- create metric definition
- inputColumns string[]
- Columns on the monitored table to apply the custom metrics to.
- name string
- Name of the custom metric.
- outputData stringType 
- The output type of the custom metric.
- type string
- The type of the custom metric.
- definition str
- create metric definition
- input_columns Sequence[str]
- Columns on the monitored table to apply the custom metrics to.
- name str
- Name of the custom metric.
- output_data_ strtype 
- The output type of the custom metric.
- type str
- The type of the custom metric.
- definition String
- create metric definition
- inputColumns List<String>
- Columns on the monitored table to apply the custom metrics to.
- name String
- Name of the custom metric.
- outputData StringType 
- The output type of the custom metric.
- type String
- The type of the custom metric.
QualityMonitorDataClassificationConfig, QualityMonitorDataClassificationConfigArgs          
- Enabled bool
- Enabled bool
- enabled Boolean
- enabled boolean
- enabled bool
- enabled Boolean
QualityMonitorInferenceLog, QualityMonitorInferenceLogArgs        
- Granularities List<string>
- List of granularities to use when aggregating data into time windows based on their timestamp.
- ModelId stringCol 
- Column of the model id or version
- PredictionCol string
- Column of the model prediction
- ProblemType string
- Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATIONorPROBLEM_TYPE_REGRESSION
- TimestampCol string
- Column of the timestamp of predictions
- LabelCol string
- Column of the model label
- PredictionProba stringCol 
- Column of the model prediction probabilities
- Granularities []string
- List of granularities to use when aggregating data into time windows based on their timestamp.
- ModelId stringCol 
- Column of the model id or version
- PredictionCol string
- Column of the model prediction
- ProblemType string
- Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATIONorPROBLEM_TYPE_REGRESSION
- TimestampCol string
- Column of the timestamp of predictions
- LabelCol string
- Column of the model label
- PredictionProba stringCol 
- Column of the model prediction probabilities
- granularities List<String>
- List of granularities to use when aggregating data into time windows based on their timestamp.
- modelId StringCol 
- Column of the model id or version
- predictionCol String
- Column of the model prediction
- problemType String
- Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATIONorPROBLEM_TYPE_REGRESSION
- timestampCol String
- Column of the timestamp of predictions
- labelCol String
- Column of the model label
- predictionProba StringCol 
- Column of the model prediction probabilities
- granularities string[]
- List of granularities to use when aggregating data into time windows based on their timestamp.
- modelId stringCol 
- Column of the model id or version
- predictionCol string
- Column of the model prediction
- problemType string
- Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATIONorPROBLEM_TYPE_REGRESSION
- timestampCol string
- Column of the timestamp of predictions
- labelCol string
- Column of the model label
- predictionProba stringCol 
- Column of the model prediction probabilities
- granularities Sequence[str]
- List of granularities to use when aggregating data into time windows based on their timestamp.
- model_id_ strcol 
- Column of the model id or version
- prediction_col str
- Column of the model prediction
- problem_type str
- Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATIONorPROBLEM_TYPE_REGRESSION
- timestamp_col str
- Column of the timestamp of predictions
- label_col str
- Column of the model label
- prediction_proba_ strcol 
- Column of the model prediction probabilities
- granularities List<String>
- List of granularities to use when aggregating data into time windows based on their timestamp.
- modelId StringCol 
- Column of the model id or version
- predictionCol String
- Column of the model prediction
- problemType String
- Problem type the model aims to solve. Either PROBLEM_TYPE_CLASSIFICATIONorPROBLEM_TYPE_REGRESSION
- timestampCol String
- Column of the timestamp of predictions
- labelCol String
- Column of the model label
- predictionProba StringCol 
- Column of the model prediction probabilities
QualityMonitorNotifications, QualityMonitorNotificationsArgs      
- OnFailure QualityMonitor Notifications On Failure 
- who to send notifications to on monitor failure.
- OnNew QualityClassification Tag Detected Monitor Notifications On New Classification Tag Detected 
- Who to send notifications to when new data classification tags are detected.
- OnFailure QualityMonitor Notifications On Failure 
- who to send notifications to on monitor failure.
- OnNew QualityClassification Tag Detected Monitor Notifications On New Classification Tag Detected 
- Who to send notifications to when new data classification tags are detected.
- onFailure QualityMonitor Notifications On Failure 
- who to send notifications to on monitor failure.
- onNew QualityClassification Tag Detected Monitor Notifications On New Classification Tag Detected 
- Who to send notifications to when new data classification tags are detected.
- onFailure QualityMonitor Notifications On Failure 
- who to send notifications to on monitor failure.
- onNew QualityClassification Tag Detected Monitor Notifications On New Classification Tag Detected 
- Who to send notifications to when new data classification tags are detected.
- on_failure QualityMonitor Notifications On Failure 
- who to send notifications to on monitor failure.
- on_new_ Qualityclassification_ tag_ detected Monitor Notifications On New Classification Tag Detected 
- Who to send notifications to when new data classification tags are detected.
- onFailure Property Map
- who to send notifications to on monitor failure.
- onNew Property MapClassification Tag Detected 
- Who to send notifications to when new data classification tags are detected.
QualityMonitorNotificationsOnFailure, QualityMonitorNotificationsOnFailureArgs          
- EmailAddresses List<string>
- EmailAddresses []string
- emailAddresses List<String>
- emailAddresses string[]
- email_addresses Sequence[str]
- emailAddresses List<String>
QualityMonitorNotificationsOnNewClassificationTagDetected, QualityMonitorNotificationsOnNewClassificationTagDetectedArgs                
- EmailAddresses List<string>
- EmailAddresses []string
- emailAddresses List<String>
- emailAddresses string[]
- email_addresses Sequence[str]
- emailAddresses List<String>
QualityMonitorSchedule, QualityMonitorScheduleArgs      
- QuartzCron stringExpression 
- string expression that determines when to run the monitor. See Quartz documentation for examples.
- TimezoneId string
- string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
- PauseStatus string
- QuartzCron stringExpression 
- string expression that determines when to run the monitor. See Quartz documentation for examples.
- TimezoneId string
- string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
- PauseStatus string
- quartzCron StringExpression 
- string expression that determines when to run the monitor. See Quartz documentation for examples.
- timezoneId String
- string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
- pauseStatus String
- quartzCron stringExpression 
- string expression that determines when to run the monitor. See Quartz documentation for examples.
- timezoneId string
- string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
- pauseStatus string
- quartz_cron_ strexpression 
- string expression that determines when to run the monitor. See Quartz documentation for examples.
- timezone_id str
- string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
- pause_status str
- quartzCron StringExpression 
- string expression that determines when to run the monitor. See Quartz documentation for examples.
- timezoneId String
- string with timezone id (e.g., PST) in which to evaluate the Quartz expression.
- pauseStatus String
QualityMonitorTimeSeries, QualityMonitorTimeSeriesArgs        
- Granularities List<string>
- List of granularities to use when aggregating data into time windows based on their timestamp.
- TimestampCol string
- Column of the timestamp of predictions
- Granularities []string
- List of granularities to use when aggregating data into time windows based on their timestamp.
- TimestampCol string
- Column of the timestamp of predictions
- granularities List<String>
- List of granularities to use when aggregating data into time windows based on their timestamp.
- timestampCol String
- Column of the timestamp of predictions
- granularities string[]
- List of granularities to use when aggregating data into time windows based on their timestamp.
- timestampCol string
- Column of the timestamp of predictions
- granularities Sequence[str]
- List of granularities to use when aggregating data into time windows based on their timestamp.
- timestamp_col str
- Column of the timestamp of predictions
- granularities List<String>
- List of granularities to use when aggregating data into time windows based on their timestamp.
- timestampCol String
- Column of the timestamp of predictions
Package Details
- Repository
- databricks pulumi/pulumi-databricks
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the databricksTerraform Provider.