yandex.MdbKafkaCluster
Explore with Pulumi AI
Manages a Kafka cluster within the Yandex.Cloud. For more information, see the official documentation.
Example Usage
Example of creating a Single Node Kafka.
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.5.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var fooMdbKafkaCluster = new Yandex.MdbKafkaCluster("fooMdbKafkaCluster", new Yandex.MdbKafkaClusterArgs
        {
            Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
            {
                AssignPublicIp = false,
                BrokersCount = 1,
                Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
                {
                    KafkaConfig = new Yandex.Inputs.MdbKafkaClusterConfigKafkaKafkaConfigArgs
                    {
                        CompressionType = "COMPRESSION_TYPE_ZSTD",
                        DefaultReplicationFactor = "1",
                        LogFlushIntervalMessages = "1024",
                        LogFlushIntervalMs = "1000",
                        LogFlushSchedulerIntervalMs = "1000",
                        LogPreallocate = true,
                        LogRetentionBytes = "1073741824",
                        LogRetentionHours = "168",
                        LogRetentionMinutes = "10080",
                        LogRetentionMs = "86400000",
                        LogSegmentBytes = "134217728",
                        NumPartitions = "10",
                    },
                    Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
                    {
                        DiskSize = 32,
                        DiskTypeId = "network-ssd",
                        ResourcePresetId = "s2.micro",
                    },
                },
                SchemaRegistry = false,
                UnmanagedTopics = false,
                Version = "2.8",
                Zones = 
                {
                    "ru-central1-a",
                },
            },
            Environment = "PRESTABLE",
            NetworkId = fooVpcNetwork.Id,
            SubnetIds = 
            {
                fooVpcSubnet.Id,
            },
            Users = 
            {
                new Yandex.Inputs.MdbKafkaClusterUserArgs
                {
                    Name = "producer-application",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_PRODUCER",
                            TopicName = "input",
                        },
                    },
                },
                new Yandex.Inputs.MdbKafkaClusterUserArgs
                {
                    Name = "worker",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_CONSUMER",
                            TopicName = "input",
                        },
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_PRODUCER",
                            TopicName = "output",
                        },
                    },
                },
            },
        });
    }
}
package main
import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.5.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbKafkaCluster(ctx, "fooMdbKafkaCluster", &yandex.MdbKafkaClusterArgs{
			Config: &MdbKafkaClusterConfigArgs{
				AssignPublicIp: pulumi.Bool(false),
				BrokersCount:   pulumi.Int(1),
				Kafka: &MdbKafkaClusterConfigKafkaArgs{
					KafkaConfig: &MdbKafkaClusterConfigKafkaKafkaConfigArgs{
						CompressionType:             pulumi.String("COMPRESSION_TYPE_ZSTD"),
						DefaultReplicationFactor:    pulumi.String("1"),
						LogFlushIntervalMessages:    pulumi.String("1024"),
						LogFlushIntervalMs:          pulumi.String("1000"),
						LogFlushSchedulerIntervalMs: pulumi.String("1000"),
						LogPreallocate:              pulumi.Bool(true),
						LogRetentionBytes:           pulumi.String("1073741824"),
						LogRetentionHours:           pulumi.String("168"),
						LogRetentionMinutes:         pulumi.String("10080"),
						LogRetentionMs:              pulumi.String("86400000"),
						LogSegmentBytes:             pulumi.String("134217728"),
						NumPartitions:               pulumi.String("10"),
					},
					Resources: &MdbKafkaClusterConfigKafkaResourcesArgs{
						DiskSize:         pulumi.Int(32),
						DiskTypeId:       pulumi.String("network-ssd"),
						ResourcePresetId: pulumi.String("s2.micro"),
					},
				},
				SchemaRegistry:  pulumi.Bool(false),
				UnmanagedTopics: pulumi.Bool(false),
				Version:         pulumi.String("2.8"),
				Zones: pulumi.StringArray{
					pulumi.String("ru-central1-a"),
				},
			},
			Environment: pulumi.String("PRESTABLE"),
			NetworkId:   fooVpcNetwork.ID(),
			SubnetIds: pulumi.StringArray{
				fooVpcSubnet.ID(),
			},
			Users: MdbKafkaClusterUserArray{
				&MdbKafkaClusterUserArgs{
					Name:     pulumi.String("producer-application"),
					Password: pulumi.String("password"),
					Permissions: MdbKafkaClusterUserPermissionArray{
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_PRODUCER"),
							TopicName: pulumi.String("input"),
						},
					},
				},
				&MdbKafkaClusterUserArgs{
					Name:     pulumi.String("worker"),
					Password: pulumi.String("password"),
					Permissions: MdbKafkaClusterUserPermissionArray{
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_CONSUMER"),
							TopicName: pulumi.String("input"),
						},
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_PRODUCER"),
							TopicName: pulumi.String("output"),
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Coming soon!
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.5.0.0/24"],
    zone: "ru-central1-a",
});
const fooMdbKafkaCluster = new yandex.MdbKafkaCluster("foo", {
    config: {
        assignPublicIp: false,
        brokersCount: 1,
        kafka: {
            kafkaConfig: {
                compressionType: "COMPRESSION_TYPE_ZSTD",
                defaultReplicationFactor: "1",
                logFlushIntervalMessages: "1024",
                logFlushIntervalMs: "1000",
                logFlushSchedulerIntervalMs: "1000",
                logPreallocate: true,
                logRetentionBytes: "1.073741824e+09",
                logRetentionHours: "168",
                logRetentionMinutes: "10080",
                logRetentionMs: "8.64e+07",
                logSegmentBytes: "1.34217728e+08",
                numPartitions: "10",
            },
            resources: {
                diskSize: 32,
                diskTypeId: "network-ssd",
                resourcePresetId: "s2.micro",
            },
        },
        schemaRegistry: false,
        unmanagedTopics: false,
        version: "2.8",
        zones: ["ru-central1-a"],
    },
    environment: "PRESTABLE",
    networkId: fooVpcNetwork.id,
    subnetIds: [fooVpcSubnet.id],
    users: [
        {
            name: "producer-application",
            password: "password",
            permissions: [{
                role: "ACCESS_ROLE_PRODUCER",
                topicName: "input",
            }],
        },
        {
            name: "worker",
            password: "password",
            permissions: [
                {
                    role: "ACCESS_ROLE_CONSUMER",
                    topicName: "input",
                },
                {
                    role: "ACCESS_ROLE_PRODUCER",
                    topicName: "output",
                },
            ],
        },
    ],
});
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.5.0.0/24"],
    zone="ru-central1-a")
foo_mdb_kafka_cluster = yandex.MdbKafkaCluster("fooMdbKafkaCluster",
    config=yandex.MdbKafkaClusterConfigArgs(
        assign_public_ip=False,
        brokers_count=1,
        kafka=yandex.MdbKafkaClusterConfigKafkaArgs(
            kafka_config=yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs(
                compression_type="COMPRESSION_TYPE_ZSTD",
                default_replication_factor="1",
                log_flush_interval_messages="1024",
                log_flush_interval_ms="1000",
                log_flush_scheduler_interval_ms="1000",
                log_preallocate=True,
                log_retention_bytes="1073741824",
                log_retention_hours="168",
                log_retention_minutes="10080",
                log_retention_ms="86400000",
                log_segment_bytes="134217728",
                num_partitions="10",
            ),
            resources=yandex.MdbKafkaClusterConfigKafkaResourcesArgs(
                disk_size=32,
                disk_type_id="network-ssd",
                resource_preset_id="s2.micro",
            ),
        ),
        schema_registry=False,
        unmanaged_topics=False,
        version="2.8",
        zones=["ru-central1-a"],
    ),
    environment="PRESTABLE",
    network_id=foo_vpc_network.id,
    subnet_ids=[foo_vpc_subnet.id],
    users=[
        yandex.MdbKafkaClusterUserArgs(
            name="producer-application",
            password="password",
            permissions=[yandex.MdbKafkaClusterUserPermissionArgs(
                role="ACCESS_ROLE_PRODUCER",
                topic_name="input",
            )],
        ),
        yandex.MdbKafkaClusterUserArgs(
            name="worker",
            password="password",
            permissions=[
                yandex.MdbKafkaClusterUserPermissionArgs(
                    role="ACCESS_ROLE_CONSUMER",
                    topic_name="input",
                ),
                yandex.MdbKafkaClusterUserPermissionArgs(
                    role="ACCESS_ROLE_PRODUCER",
                    topic_name="output",
                ),
            ],
        ),
    ])
Coming soon!
Example of creating a HA Kafka Cluster with two brokers per AZ (6 brokers + 3 zk)
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.1.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var bar = new Yandex.VpcSubnet("bar", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.2.0.0/24",
            },
            Zone = "ru-central1-b",
        });
        var baz = new Yandex.VpcSubnet("baz", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.3.0.0/24",
            },
            Zone = "ru-central1-c",
        });
        var fooMdbKafkaCluster = new Yandex.MdbKafkaCluster("fooMdbKafkaCluster", new Yandex.MdbKafkaClusterArgs
        {
            Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
            {
                AssignPublicIp = true,
                BrokersCount = 2,
                Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
                {
                    KafkaConfig = new Yandex.Inputs.MdbKafkaClusterConfigKafkaKafkaConfigArgs
                    {
                        CompressionType = "COMPRESSION_TYPE_ZSTD",
                        DefaultReplicationFactor = "6",
                        LogFlushIntervalMessages = "1024",
                        LogFlushIntervalMs = "1000",
                        LogFlushSchedulerIntervalMs = "1000",
                        LogPreallocate = true,
                        LogRetentionBytes = "1073741824",
                        LogRetentionHours = "168",
                        LogRetentionMinutes = "10080",
                        LogRetentionMs = "86400000",
                        LogSegmentBytes = "134217728",
                        NumPartitions = "10",
                    },
                    Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
                    {
                        DiskSize = 128,
                        DiskTypeId = "network-ssd",
                        ResourcePresetId = "s2.medium",
                    },
                },
                SchemaRegistry = false,
                UnmanagedTopics = false,
                Version = "2.8",
                Zones = 
                {
                    "ru-central1-a",
                    "ru-central1-b",
                    "ru-central1-c",
                },
                Zookeeper = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperArgs
                {
                    Resources = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperResourcesArgs
                    {
                        DiskSize = 20,
                        DiskTypeId = "network-ssd",
                        ResourcePresetId = "s2.micro",
                    },
                },
            },
            Environment = "PRESTABLE",
            NetworkId = fooVpcNetwork.Id,
            SubnetIds = 
            {
                fooVpcSubnet.Id,
                bar.Id,
                baz.Id,
            },
            Users = 
            {
                new Yandex.Inputs.MdbKafkaClusterUserArgs
                {
                    Name = "producer-application",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_PRODUCER",
                            TopicName = "input",
                        },
                    },
                },
                new Yandex.Inputs.MdbKafkaClusterUserArgs
                {
                    Name = "worker",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_CONSUMER",
                            TopicName = "input",
                        },
                        new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                        {
                            Role = "ACCESS_ROLE_PRODUCER",
                            TopicName = "output",
                        },
                    },
                },
            },
        });
    }
}
package main
import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.1.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		bar, err := yandex.NewVpcSubnet(ctx, "bar", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.2.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-b"),
		})
		if err != nil {
			return err
		}
		baz, err := yandex.NewVpcSubnet(ctx, "baz", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.3.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-c"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbKafkaCluster(ctx, "fooMdbKafkaCluster", &yandex.MdbKafkaClusterArgs{
			Config: &MdbKafkaClusterConfigArgs{
				AssignPublicIp: pulumi.Bool(true),
				BrokersCount:   pulumi.Int(2),
				Kafka: &MdbKafkaClusterConfigKafkaArgs{
					KafkaConfig: &MdbKafkaClusterConfigKafkaKafkaConfigArgs{
						CompressionType:             pulumi.String("COMPRESSION_TYPE_ZSTD"),
						DefaultReplicationFactor:    pulumi.String("6"),
						LogFlushIntervalMessages:    pulumi.String("1024"),
						LogFlushIntervalMs:          pulumi.String("1000"),
						LogFlushSchedulerIntervalMs: pulumi.String("1000"),
						LogPreallocate:              pulumi.Bool(true),
						LogRetentionBytes:           pulumi.String("1073741824"),
						LogRetentionHours:           pulumi.String("168"),
						LogRetentionMinutes:         pulumi.String("10080"),
						LogRetentionMs:              pulumi.String("86400000"),
						LogSegmentBytes:             pulumi.String("134217728"),
						NumPartitions:               pulumi.String("10"),
					},
					Resources: &MdbKafkaClusterConfigKafkaResourcesArgs{
						DiskSize:         pulumi.Int(128),
						DiskTypeId:       pulumi.String("network-ssd"),
						ResourcePresetId: pulumi.String("s2.medium"),
					},
				},
				SchemaRegistry:  pulumi.Bool(false),
				UnmanagedTopics: pulumi.Bool(false),
				Version:         pulumi.String("2.8"),
				Zones: pulumi.StringArray{
					pulumi.String("ru-central1-a"),
					pulumi.String("ru-central1-b"),
					pulumi.String("ru-central1-c"),
				},
				Zookeeper: &MdbKafkaClusterConfigZookeeperArgs{
					Resources: &MdbKafkaClusterConfigZookeeperResourcesArgs{
						DiskSize:         pulumi.Int(20),
						DiskTypeId:       pulumi.String("network-ssd"),
						ResourcePresetId: pulumi.String("s2.micro"),
					},
				},
			},
			Environment: pulumi.String("PRESTABLE"),
			NetworkId:   fooVpcNetwork.ID(),
			SubnetIds: pulumi.StringArray{
				fooVpcSubnet.ID(),
				bar.ID(),
				baz.ID(),
			},
			Users: MdbKafkaClusterUserArray{
				&MdbKafkaClusterUserArgs{
					Name:     pulumi.String("producer-application"),
					Password: pulumi.String("password"),
					Permissions: MdbKafkaClusterUserPermissionArray{
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_PRODUCER"),
							TopicName: pulumi.String("input"),
						},
					},
				},
				&MdbKafkaClusterUserArgs{
					Name:     pulumi.String("worker"),
					Password: pulumi.String("password"),
					Permissions: MdbKafkaClusterUserPermissionArray{
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_CONSUMER"),
							TopicName: pulumi.String("input"),
						},
						&MdbKafkaClusterUserPermissionArgs{
							Role:      pulumi.String("ACCESS_ROLE_PRODUCER"),
							TopicName: pulumi.String("output"),
						},
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Coming soon!
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.1.0.0/24"],
    zone: "ru-central1-a",
});
const bar = new yandex.VpcSubnet("bar", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.2.0.0/24"],
    zone: "ru-central1-b",
});
const baz = new yandex.VpcSubnet("baz", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.3.0.0/24"],
    zone: "ru-central1-c",
});
const fooMdbKafkaCluster = new yandex.MdbKafkaCluster("foo", {
    config: {
        assignPublicIp: true,
        brokersCount: 2,
        kafka: {
            kafkaConfig: {
                compressionType: "COMPRESSION_TYPE_ZSTD",
                defaultReplicationFactor: "6",
                logFlushIntervalMessages: "1024",
                logFlushIntervalMs: "1000",
                logFlushSchedulerIntervalMs: "1000",
                logPreallocate: true,
                logRetentionBytes: "1.073741824e+09",
                logRetentionHours: "168",
                logRetentionMinutes: "10080",
                logRetentionMs: "8.64e+07",
                logSegmentBytes: "1.34217728e+08",
                numPartitions: "10",
            },
            resources: {
                diskSize: 128,
                diskTypeId: "network-ssd",
                resourcePresetId: "s2.medium",
            },
        },
        schemaRegistry: false,
        unmanagedTopics: false,
        version: "2.8",
        zones: [
            "ru-central1-a",
            "ru-central1-b",
            "ru-central1-c",
        ],
        zookeeper: {
            resources: {
                diskSize: 20,
                diskTypeId: "network-ssd",
                resourcePresetId: "s2.micro",
            },
        },
    },
    environment: "PRESTABLE",
    networkId: fooVpcNetwork.id,
    subnetIds: [
        fooVpcSubnet.id,
        bar.id,
        baz.id,
    ],
    users: [
        {
            name: "producer-application",
            password: "password",
            permissions: [{
                role: "ACCESS_ROLE_PRODUCER",
                topicName: "input",
            }],
        },
        {
            name: "worker",
            password: "password",
            permissions: [
                {
                    role: "ACCESS_ROLE_CONSUMER",
                    topicName: "input",
                },
                {
                    role: "ACCESS_ROLE_PRODUCER",
                    topicName: "output",
                },
            ],
        },
    ],
});
import pulumi
import pulumi_yandex as yandex
foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.1.0.0/24"],
    zone="ru-central1-a")
bar = yandex.VpcSubnet("bar",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.2.0.0/24"],
    zone="ru-central1-b")
baz = yandex.VpcSubnet("baz",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.3.0.0/24"],
    zone="ru-central1-c")
foo_mdb_kafka_cluster = yandex.MdbKafkaCluster("fooMdbKafkaCluster",
    config=yandex.MdbKafkaClusterConfigArgs(
        assign_public_ip=True,
        brokers_count=2,
        kafka=yandex.MdbKafkaClusterConfigKafkaArgs(
            kafka_config=yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs(
                compression_type="COMPRESSION_TYPE_ZSTD",
                default_replication_factor="6",
                log_flush_interval_messages="1024",
                log_flush_interval_ms="1000",
                log_flush_scheduler_interval_ms="1000",
                log_preallocate=True,
                log_retention_bytes="1073741824",
                log_retention_hours="168",
                log_retention_minutes="10080",
                log_retention_ms="86400000",
                log_segment_bytes="134217728",
                num_partitions="10",
            ),
            resources=yandex.MdbKafkaClusterConfigKafkaResourcesArgs(
                disk_size=128,
                disk_type_id="network-ssd",
                resource_preset_id="s2.medium",
            ),
        ),
        schema_registry=False,
        unmanaged_topics=False,
        version="2.8",
        zones=[
            "ru-central1-a",
            "ru-central1-b",
            "ru-central1-c",
        ],
        zookeeper=yandex.MdbKafkaClusterConfigZookeeperArgs(
            resources=yandex.MdbKafkaClusterConfigZookeeperResourcesArgs(
                disk_size=20,
                disk_type_id="network-ssd",
                resource_preset_id="s2.micro",
            ),
        ),
    ),
    environment="PRESTABLE",
    network_id=foo_vpc_network.id,
    subnet_ids=[
        foo_vpc_subnet.id,
        bar.id,
        baz.id,
    ],
    users=[
        yandex.MdbKafkaClusterUserArgs(
            name="producer-application",
            password="password",
            permissions=[yandex.MdbKafkaClusterUserPermissionArgs(
                role="ACCESS_ROLE_PRODUCER",
                topic_name="input",
            )],
        ),
        yandex.MdbKafkaClusterUserArgs(
            name="worker",
            password="password",
            permissions=[
                yandex.MdbKafkaClusterUserPermissionArgs(
                    role="ACCESS_ROLE_CONSUMER",
                    topic_name="input",
                ),
                yandex.MdbKafkaClusterUserPermissionArgs(
                    role="ACCESS_ROLE_PRODUCER",
                    topic_name="output",
                ),
            ],
        ),
    ])
Coming soon!
Create MdbKafkaCluster Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MdbKafkaCluster(name: string, args: MdbKafkaClusterArgs, opts?: CustomResourceOptions);@overload
def MdbKafkaCluster(resource_name: str,
                    args: MdbKafkaClusterArgs,
                    opts: Optional[ResourceOptions] = None)
@overload
def MdbKafkaCluster(resource_name: str,
                    opts: Optional[ResourceOptions] = None,
                    config: Optional[MdbKafkaClusterConfigArgs] = None,
                    network_id: Optional[str] = None,
                    environment: Optional[str] = None,
                    description: Optional[str] = None,
                    folder_id: Optional[str] = None,
                    host_group_ids: Optional[Sequence[str]] = None,
                    labels: Optional[Mapping[str, str]] = None,
                    maintenance_window: Optional[MdbKafkaClusterMaintenanceWindowArgs] = None,
                    name: Optional[str] = None,
                    deletion_protection: Optional[bool] = None,
                    security_group_ids: Optional[Sequence[str]] = None,
                    subnet_ids: Optional[Sequence[str]] = None,
                    topics: Optional[Sequence[MdbKafkaClusterTopicArgs]] = None,
                    users: Optional[Sequence[MdbKafkaClusterUserArgs]] = None)func NewMdbKafkaCluster(ctx *Context, name string, args MdbKafkaClusterArgs, opts ...ResourceOption) (*MdbKafkaCluster, error)public MdbKafkaCluster(string name, MdbKafkaClusterArgs args, CustomResourceOptions? opts = null)
public MdbKafkaCluster(String name, MdbKafkaClusterArgs args)
public MdbKafkaCluster(String name, MdbKafkaClusterArgs args, CustomResourceOptions options)
type: yandex:MdbKafkaCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MdbKafkaClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var mdbKafkaClusterResource = new Yandex.MdbKafkaCluster("mdbKafkaClusterResource", new()
{
    Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
    {
        Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
        {
            Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
            {
                DiskSize = 0,
                DiskTypeId = "string",
                ResourcePresetId = "string",
            },
            KafkaConfig = new Yandex.Inputs.MdbKafkaClusterConfigKafkaKafkaConfigArgs
            {
                AutoCreateTopicsEnable = false,
                CompressionType = "string",
                DefaultReplicationFactor = "string",
                LogFlushIntervalMessages = "string",
                LogFlushIntervalMs = "string",
                LogFlushSchedulerIntervalMs = "string",
                LogPreallocate = false,
                LogRetentionBytes = "string",
                LogRetentionHours = "string",
                LogRetentionMinutes = "string",
                LogRetentionMs = "string",
                LogSegmentBytes = "string",
                NumPartitions = "string",
                SocketReceiveBufferBytes = "string",
                SocketSendBufferBytes = "string",
            },
        },
        Version = "string",
        Zones = new[]
        {
            "string",
        },
        AssignPublicIp = false,
        BrokersCount = 0,
        SchemaRegistry = false,
        UnmanagedTopics = false,
        Zookeeper = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperArgs
        {
            Resources = new Yandex.Inputs.MdbKafkaClusterConfigZookeeperResourcesArgs
            {
                DiskSize = 0,
                DiskTypeId = "string",
                ResourcePresetId = "string",
            },
        },
    },
    NetworkId = "string",
    Environment = "string",
    Description = "string",
    FolderId = "string",
    HostGroupIds = new[]
    {
        "string",
    },
    Labels = 
    {
        { "string", "string" },
    },
    MaintenanceWindow = new Yandex.Inputs.MdbKafkaClusterMaintenanceWindowArgs
    {
        Type = "string",
        Day = "string",
        Hour = 0,
    },
    Name = "string",
    DeletionProtection = false,
    SecurityGroupIds = new[]
    {
        "string",
    },
    SubnetIds = new[]
    {
        "string",
    },
    Users = new[]
    {
        new Yandex.Inputs.MdbKafkaClusterUserArgs
        {
            Name = "string",
            Password = "string",
            Permissions = new[]
            {
                new Yandex.Inputs.MdbKafkaClusterUserPermissionArgs
                {
                    Role = "string",
                    TopicName = "string",
                },
            },
        },
    },
});
example, err := yandex.NewMdbKafkaCluster(ctx, "mdbKafkaClusterResource", &yandex.MdbKafkaClusterArgs{
	Config: &yandex.MdbKafkaClusterConfigArgs{
		Kafka: &yandex.MdbKafkaClusterConfigKafkaArgs{
			Resources: &yandex.MdbKafkaClusterConfigKafkaResourcesArgs{
				DiskSize:         pulumi.Int(0),
				DiskTypeId:       pulumi.String("string"),
				ResourcePresetId: pulumi.String("string"),
			},
			KafkaConfig: &yandex.MdbKafkaClusterConfigKafkaKafkaConfigArgs{
				AutoCreateTopicsEnable:      pulumi.Bool(false),
				CompressionType:             pulumi.String("string"),
				DefaultReplicationFactor:    pulumi.String("string"),
				LogFlushIntervalMessages:    pulumi.String("string"),
				LogFlushIntervalMs:          pulumi.String("string"),
				LogFlushSchedulerIntervalMs: pulumi.String("string"),
				LogPreallocate:              pulumi.Bool(false),
				LogRetentionBytes:           pulumi.String("string"),
				LogRetentionHours:           pulumi.String("string"),
				LogRetentionMinutes:         pulumi.String("string"),
				LogRetentionMs:              pulumi.String("string"),
				LogSegmentBytes:             pulumi.String("string"),
				NumPartitions:               pulumi.String("string"),
				SocketReceiveBufferBytes:    pulumi.String("string"),
				SocketSendBufferBytes:       pulumi.String("string"),
			},
		},
		Version: pulumi.String("string"),
		Zones: pulumi.StringArray{
			pulumi.String("string"),
		},
		AssignPublicIp:  pulumi.Bool(false),
		BrokersCount:    pulumi.Int(0),
		SchemaRegistry:  pulumi.Bool(false),
		UnmanagedTopics: pulumi.Bool(false),
		Zookeeper: &yandex.MdbKafkaClusterConfigZookeeperArgs{
			Resources: &yandex.MdbKafkaClusterConfigZookeeperResourcesArgs{
				DiskSize:         pulumi.Int(0),
				DiskTypeId:       pulumi.String("string"),
				ResourcePresetId: pulumi.String("string"),
			},
		},
	},
	NetworkId:   pulumi.String("string"),
	Environment: pulumi.String("string"),
	Description: pulumi.String("string"),
	FolderId:    pulumi.String("string"),
	HostGroupIds: pulumi.StringArray{
		pulumi.String("string"),
	},
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	MaintenanceWindow: &yandex.MdbKafkaClusterMaintenanceWindowArgs{
		Type: pulumi.String("string"),
		Day:  pulumi.String("string"),
		Hour: pulumi.Int(0),
	},
	Name:               pulumi.String("string"),
	DeletionProtection: pulumi.Bool(false),
	SecurityGroupIds: pulumi.StringArray{
		pulumi.String("string"),
	},
	SubnetIds: pulumi.StringArray{
		pulumi.String("string"),
	},
	Users: yandex.MdbKafkaClusterUserArray{
		&yandex.MdbKafkaClusterUserArgs{
			Name:     pulumi.String("string"),
			Password: pulumi.String("string"),
			Permissions: yandex.MdbKafkaClusterUserPermissionArray{
				&yandex.MdbKafkaClusterUserPermissionArgs{
					Role:      pulumi.String("string"),
					TopicName: pulumi.String("string"),
				},
			},
		},
	},
})
var mdbKafkaClusterResource = new MdbKafkaCluster("mdbKafkaClusterResource", MdbKafkaClusterArgs.builder()
    .config(MdbKafkaClusterConfigArgs.builder()
        .kafka(MdbKafkaClusterConfigKafkaArgs.builder()
            .resources(MdbKafkaClusterConfigKafkaResourcesArgs.builder()
                .diskSize(0)
                .diskTypeId("string")
                .resourcePresetId("string")
                .build())
            .kafkaConfig(MdbKafkaClusterConfigKafkaKafkaConfigArgs.builder()
                .autoCreateTopicsEnable(false)
                .compressionType("string")
                .defaultReplicationFactor("string")
                .logFlushIntervalMessages("string")
                .logFlushIntervalMs("string")
                .logFlushSchedulerIntervalMs("string")
                .logPreallocate(false)
                .logRetentionBytes("string")
                .logRetentionHours("string")
                .logRetentionMinutes("string")
                .logRetentionMs("string")
                .logSegmentBytes("string")
                .numPartitions("string")
                .socketReceiveBufferBytes("string")
                .socketSendBufferBytes("string")
                .build())
            .build())
        .version("string")
        .zones("string")
        .assignPublicIp(false)
        .brokersCount(0)
        .schemaRegistry(false)
        .unmanagedTopics(false)
        .zookeeper(MdbKafkaClusterConfigZookeeperArgs.builder()
            .resources(MdbKafkaClusterConfigZookeeperResourcesArgs.builder()
                .diskSize(0)
                .diskTypeId("string")
                .resourcePresetId("string")
                .build())
            .build())
        .build())
    .networkId("string")
    .environment("string")
    .description("string")
    .folderId("string")
    .hostGroupIds("string")
    .labels(Map.of("string", "string"))
    .maintenanceWindow(MdbKafkaClusterMaintenanceWindowArgs.builder()
        .type("string")
        .day("string")
        .hour(0)
        .build())
    .name("string")
    .deletionProtection(false)
    .securityGroupIds("string")
    .subnetIds("string")
    .users(MdbKafkaClusterUserArgs.builder()
        .name("string")
        .password("string")
        .permissions(MdbKafkaClusterUserPermissionArgs.builder()
            .role("string")
            .topicName("string")
            .build())
        .build())
    .build());
mdb_kafka_cluster_resource = yandex.MdbKafkaCluster("mdbKafkaClusterResource",
    config={
        "kafka": {
            "resources": {
                "disk_size": 0,
                "disk_type_id": "string",
                "resource_preset_id": "string",
            },
            "kafka_config": {
                "auto_create_topics_enable": False,
                "compression_type": "string",
                "default_replication_factor": "string",
                "log_flush_interval_messages": "string",
                "log_flush_interval_ms": "string",
                "log_flush_scheduler_interval_ms": "string",
                "log_preallocate": False,
                "log_retention_bytes": "string",
                "log_retention_hours": "string",
                "log_retention_minutes": "string",
                "log_retention_ms": "string",
                "log_segment_bytes": "string",
                "num_partitions": "string",
                "socket_receive_buffer_bytes": "string",
                "socket_send_buffer_bytes": "string",
            },
        },
        "version": "string",
        "zones": ["string"],
        "assign_public_ip": False,
        "brokers_count": 0,
        "schema_registry": False,
        "unmanaged_topics": False,
        "zookeeper": {
            "resources": {
                "disk_size": 0,
                "disk_type_id": "string",
                "resource_preset_id": "string",
            },
        },
    },
    network_id="string",
    environment="string",
    description="string",
    folder_id="string",
    host_group_ids=["string"],
    labels={
        "string": "string",
    },
    maintenance_window={
        "type": "string",
        "day": "string",
        "hour": 0,
    },
    name="string",
    deletion_protection=False,
    security_group_ids=["string"],
    subnet_ids=["string"],
    users=[{
        "name": "string",
        "password": "string",
        "permissions": [{
            "role": "string",
            "topic_name": "string",
        }],
    }])
const mdbKafkaClusterResource = new yandex.MdbKafkaCluster("mdbKafkaClusterResource", {
    config: {
        kafka: {
            resources: {
                diskSize: 0,
                diskTypeId: "string",
                resourcePresetId: "string",
            },
            kafkaConfig: {
                autoCreateTopicsEnable: false,
                compressionType: "string",
                defaultReplicationFactor: "string",
                logFlushIntervalMessages: "string",
                logFlushIntervalMs: "string",
                logFlushSchedulerIntervalMs: "string",
                logPreallocate: false,
                logRetentionBytes: "string",
                logRetentionHours: "string",
                logRetentionMinutes: "string",
                logRetentionMs: "string",
                logSegmentBytes: "string",
                numPartitions: "string",
                socketReceiveBufferBytes: "string",
                socketSendBufferBytes: "string",
            },
        },
        version: "string",
        zones: ["string"],
        assignPublicIp: false,
        brokersCount: 0,
        schemaRegistry: false,
        unmanagedTopics: false,
        zookeeper: {
            resources: {
                diskSize: 0,
                diskTypeId: "string",
                resourcePresetId: "string",
            },
        },
    },
    networkId: "string",
    environment: "string",
    description: "string",
    folderId: "string",
    hostGroupIds: ["string"],
    labels: {
        string: "string",
    },
    maintenanceWindow: {
        type: "string",
        day: "string",
        hour: 0,
    },
    name: "string",
    deletionProtection: false,
    securityGroupIds: ["string"],
    subnetIds: ["string"],
    users: [{
        name: "string",
        password: "string",
        permissions: [{
            role: "string",
            topicName: "string",
        }],
    }],
});
type: yandex:MdbKafkaCluster
properties:
    config:
        assignPublicIp: false
        brokersCount: 0
        kafka:
            kafkaConfig:
                autoCreateTopicsEnable: false
                compressionType: string
                defaultReplicationFactor: string
                logFlushIntervalMessages: string
                logFlushIntervalMs: string
                logFlushSchedulerIntervalMs: string
                logPreallocate: false
                logRetentionBytes: string
                logRetentionHours: string
                logRetentionMinutes: string
                logRetentionMs: string
                logSegmentBytes: string
                numPartitions: string
                socketReceiveBufferBytes: string
                socketSendBufferBytes: string
            resources:
                diskSize: 0
                diskTypeId: string
                resourcePresetId: string
        schemaRegistry: false
        unmanagedTopics: false
        version: string
        zones:
            - string
        zookeeper:
            resources:
                diskSize: 0
                diskTypeId: string
                resourcePresetId: string
    deletionProtection: false
    description: string
    environment: string
    folderId: string
    hostGroupIds:
        - string
    labels:
        string: string
    maintenanceWindow:
        day: string
        hour: 0
        type: string
    name: string
    networkId: string
    securityGroupIds:
        - string
    subnetIds:
        - string
    users:
        - name: string
          password: string
          permissions:
            - role: string
              topicName: string
MdbKafkaCluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The MdbKafkaCluster resource accepts the following input properties:
- Config
MdbKafka Cluster Config 
- Configuration of the Kafka cluster. The structure is documented below.
- NetworkId string
- ID of the network, to which the Kafka cluster belongs.
- DeletionProtection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- Description string
- Description of the Kafka cluster.
- Environment string
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- FolderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- HostGroup List<string>Ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- Labels Dictionary<string, string>
- A set of key/value label pairs to assign to the Kafka cluster.
- MaintenanceWindow MdbKafka Cluster Maintenance Window 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- Name string
- The name of the topic.
- SecurityGroup List<string>Ids 
- Security group ids, to which the Kafka cluster belongs.
- SubnetIds List<string>
- IDs of the subnets, to which the Kafka cluster belongs.
- Topics
List<MdbKafka Cluster Topic> 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- Users
List<MdbKafka Cluster User> 
- A user of the Kafka cluster. The structure is documented below.
- Config
MdbKafka Cluster Config Args 
- Configuration of the Kafka cluster. The structure is documented below.
- NetworkId string
- ID of the network, to which the Kafka cluster belongs.
- DeletionProtection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- Description string
- Description of the Kafka cluster.
- Environment string
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- FolderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- HostGroup []stringIds 
- A list of IDs of the host groups to place VMs of the cluster on.
- Labels map[string]string
- A set of key/value label pairs to assign to the Kafka cluster.
- MaintenanceWindow MdbKafka Cluster Maintenance Window Args 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- Name string
- The name of the topic.
- SecurityGroup []stringIds 
- Security group ids, to which the Kafka cluster belongs.
- SubnetIds []string
- IDs of the subnets, to which the Kafka cluster belongs.
- Topics
[]MdbKafka Cluster Topic Args 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- Users
[]MdbKafka Cluster User Args 
- A user of the Kafka cluster. The structure is documented below.
- config
MdbKafka Cluster Config 
- Configuration of the Kafka cluster. The structure is documented below.
- networkId String
- ID of the network, to which the Kafka cluster belongs.
- deletionProtection Boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description String
- Description of the Kafka cluster.
- environment String
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- folderId String
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- hostGroup List<String>Ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- labels Map<String,String>
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenanceWindow MdbKafka Cluster Maintenance Window 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- name String
- The name of the topic.
- securityGroup List<String>Ids 
- Security group ids, to which the Kafka cluster belongs.
- subnetIds List<String>
- IDs of the subnets, to which the Kafka cluster belongs.
- topics
List<MdbKafka Cluster Topic> 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- users
List<MdbKafka Cluster User> 
- A user of the Kafka cluster. The structure is documented below.
- config
MdbKafka Cluster Config 
- Configuration of the Kafka cluster. The structure is documented below.
- networkId string
- ID of the network, to which the Kafka cluster belongs.
- deletionProtection boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description string
- Description of the Kafka cluster.
- environment string
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- folderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- hostGroup string[]Ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- labels {[key: string]: string}
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenanceWindow MdbKafka Cluster Maintenance Window 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- name string
- The name of the topic.
- securityGroup string[]Ids 
- Security group ids, to which the Kafka cluster belongs.
- subnetIds string[]
- IDs of the subnets, to which the Kafka cluster belongs.
- topics
MdbKafka Cluster Topic[] 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- users
MdbKafka Cluster User[] 
- A user of the Kafka cluster. The structure is documented below.
- config
MdbKafka Cluster Config Args 
- Configuration of the Kafka cluster. The structure is documented below.
- network_id str
- ID of the network, to which the Kafka cluster belongs.
- deletion_protection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description str
- Description of the Kafka cluster.
- environment str
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- folder_id str
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- host_group_ Sequence[str]ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- labels Mapping[str, str]
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance_window MdbKafka Cluster Maintenance Window Args 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- name str
- The name of the topic.
- security_group_ Sequence[str]ids 
- Security group ids, to which the Kafka cluster belongs.
- subnet_ids Sequence[str]
- IDs of the subnets, to which the Kafka cluster belongs.
- topics
Sequence[MdbKafka Cluster Topic Args] 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- users
Sequence[MdbKafka Cluster User Args] 
- A user of the Kafka cluster. The structure is documented below.
- config Property Map
- Configuration of the Kafka cluster. The structure is documented below.
- networkId String
- ID of the network, to which the Kafka cluster belongs.
- deletionProtection Boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description String
- Description of the Kafka cluster.
- environment String
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- folderId String
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- hostGroup List<String>Ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- labels Map<String>
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenanceWindow Property Map
- Maintenance policy of the Kafka cluster. The structure is documented below.
- name String
- The name of the topic.
- securityGroup List<String>Ids 
- Security group ids, to which the Kafka cluster belongs.
- subnetIds List<String>
- IDs of the subnets, to which the Kafka cluster belongs.
- topics List<Property Map>
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- users List<Property Map>
- A user of the Kafka cluster. The structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the MdbKafkaCluster resource produces the following output properties:
- CreatedAt string
- Timestamp of cluster creation.
- Health string
- Health of the host.
- Hosts
List<MdbKafka Cluster Host> 
- A host of the Kafka cluster. The structure is documented below.
- Id string
- The provider-assigned unique ID for this managed resource.
- Status string
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- CreatedAt string
- Timestamp of cluster creation.
- Health string
- Health of the host.
- Hosts
[]MdbKafka Cluster Host 
- A host of the Kafka cluster. The structure is documented below.
- Id string
- The provider-assigned unique ID for this managed resource.
- Status string
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- createdAt String
- Timestamp of cluster creation.
- health String
- Health of the host.
- hosts
List<MdbKafka Cluster Host> 
- A host of the Kafka cluster. The structure is documented below.
- id String
- The provider-assigned unique ID for this managed resource.
- status String
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- createdAt string
- Timestamp of cluster creation.
- health string
- Health of the host.
- hosts
MdbKafka Cluster Host[] 
- A host of the Kafka cluster. The structure is documented below.
- id string
- The provider-assigned unique ID for this managed resource.
- status string
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- created_at str
- Timestamp of cluster creation.
- health str
- Health of the host.
- hosts
Sequence[MdbKafka Cluster Host] 
- A host of the Kafka cluster. The structure is documented below.
- id str
- The provider-assigned unique ID for this managed resource.
- status str
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- createdAt String
- Timestamp of cluster creation.
- health String
- Health of the host.
- hosts List<Property Map>
- A host of the Kafka cluster. The structure is documented below.
- id String
- The provider-assigned unique ID for this managed resource.
- status String
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
Look up Existing MdbKafkaCluster Resource
Get an existing MdbKafkaCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MdbKafkaClusterState, opts?: CustomResourceOptions): MdbKafkaCluster@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        config: Optional[MdbKafkaClusterConfigArgs] = None,
        created_at: Optional[str] = None,
        deletion_protection: Optional[bool] = None,
        description: Optional[str] = None,
        environment: Optional[str] = None,
        folder_id: Optional[str] = None,
        health: Optional[str] = None,
        host_group_ids: Optional[Sequence[str]] = None,
        hosts: Optional[Sequence[MdbKafkaClusterHostArgs]] = None,
        labels: Optional[Mapping[str, str]] = None,
        maintenance_window: Optional[MdbKafkaClusterMaintenanceWindowArgs] = None,
        name: Optional[str] = None,
        network_id: Optional[str] = None,
        security_group_ids: Optional[Sequence[str]] = None,
        status: Optional[str] = None,
        subnet_ids: Optional[Sequence[str]] = None,
        topics: Optional[Sequence[MdbKafkaClusterTopicArgs]] = None,
        users: Optional[Sequence[MdbKafkaClusterUserArgs]] = None) -> MdbKafkaClusterfunc GetMdbKafkaCluster(ctx *Context, name string, id IDInput, state *MdbKafkaClusterState, opts ...ResourceOption) (*MdbKafkaCluster, error)public static MdbKafkaCluster Get(string name, Input<string> id, MdbKafkaClusterState? state, CustomResourceOptions? opts = null)public static MdbKafkaCluster get(String name, Output<String> id, MdbKafkaClusterState state, CustomResourceOptions options)resources:  _:    type: yandex:MdbKafkaCluster    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- Config
MdbKafka Cluster Config 
- Configuration of the Kafka cluster. The structure is documented below.
- CreatedAt string
- Timestamp of cluster creation.
- DeletionProtection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- Description string
- Description of the Kafka cluster.
- Environment string
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- FolderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Health string
- Health of the host.
- HostGroup List<string>Ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- Hosts
List<MdbKafka Cluster Host> 
- A host of the Kafka cluster. The structure is documented below.
- Labels Dictionary<string, string>
- A set of key/value label pairs to assign to the Kafka cluster.
- MaintenanceWindow MdbKafka Cluster Maintenance Window 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- Name string
- The name of the topic.
- NetworkId string
- ID of the network, to which the Kafka cluster belongs.
- SecurityGroup List<string>Ids 
- Security group ids, to which the Kafka cluster belongs.
- Status string
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- SubnetIds List<string>
- IDs of the subnets, to which the Kafka cluster belongs.
- Topics
List<MdbKafka Cluster Topic> 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- Users
List<MdbKafka Cluster User> 
- A user of the Kafka cluster. The structure is documented below.
- Config
MdbKafka Cluster Config Args 
- Configuration of the Kafka cluster. The structure is documented below.
- CreatedAt string
- Timestamp of cluster creation.
- DeletionProtection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- Description string
- Description of the Kafka cluster.
- Environment string
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- FolderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- Health string
- Health of the host.
- HostGroup []stringIds 
- A list of IDs of the host groups to place VMs of the cluster on.
- Hosts
[]MdbKafka Cluster Host Args 
- A host of the Kafka cluster. The structure is documented below.
- Labels map[string]string
- A set of key/value label pairs to assign to the Kafka cluster.
- MaintenanceWindow MdbKafka Cluster Maintenance Window Args 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- Name string
- The name of the topic.
- NetworkId string
- ID of the network, to which the Kafka cluster belongs.
- SecurityGroup []stringIds 
- Security group ids, to which the Kafka cluster belongs.
- Status string
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- SubnetIds []string
- IDs of the subnets, to which the Kafka cluster belongs.
- Topics
[]MdbKafka Cluster Topic Args 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- Users
[]MdbKafka Cluster User Args 
- A user of the Kafka cluster. The structure is documented below.
- config
MdbKafka Cluster Config 
- Configuration of the Kafka cluster. The structure is documented below.
- createdAt String
- Timestamp of cluster creation.
- deletionProtection Boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description String
- Description of the Kafka cluster.
- environment String
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- folderId String
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- health String
- Health of the host.
- hostGroup List<String>Ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- hosts
List<MdbKafka Cluster Host> 
- A host of the Kafka cluster. The structure is documented below.
- labels Map<String,String>
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenanceWindow MdbKafka Cluster Maintenance Window 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- name String
- The name of the topic.
- networkId String
- ID of the network, to which the Kafka cluster belongs.
- securityGroup List<String>Ids 
- Security group ids, to which the Kafka cluster belongs.
- status String
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- subnetIds List<String>
- IDs of the subnets, to which the Kafka cluster belongs.
- topics
List<MdbKafka Cluster Topic> 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- users
List<MdbKafka Cluster User> 
- A user of the Kafka cluster. The structure is documented below.
- config
MdbKafka Cluster Config 
- Configuration of the Kafka cluster. The structure is documented below.
- createdAt string
- Timestamp of cluster creation.
- deletionProtection boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description string
- Description of the Kafka cluster.
- environment string
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- folderId string
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- health string
- Health of the host.
- hostGroup string[]Ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- hosts
MdbKafka Cluster Host[] 
- A host of the Kafka cluster. The structure is documented below.
- labels {[key: string]: string}
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenanceWindow MdbKafka Cluster Maintenance Window 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- name string
- The name of the topic.
- networkId string
- ID of the network, to which the Kafka cluster belongs.
- securityGroup string[]Ids 
- Security group ids, to which the Kafka cluster belongs.
- status string
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- subnetIds string[]
- IDs of the subnets, to which the Kafka cluster belongs.
- topics
MdbKafka Cluster Topic[] 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- users
MdbKafka Cluster User[] 
- A user of the Kafka cluster. The structure is documented below.
- config
MdbKafka Cluster Config Args 
- Configuration of the Kafka cluster. The structure is documented below.
- created_at str
- Timestamp of cluster creation.
- deletion_protection bool
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description str
- Description of the Kafka cluster.
- environment str
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- folder_id str
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- health str
- Health of the host.
- host_group_ Sequence[str]ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- hosts
Sequence[MdbKafka Cluster Host Args] 
- A host of the Kafka cluster. The structure is documented below.
- labels Mapping[str, str]
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenance_window MdbKafka Cluster Maintenance Window Args 
- Maintenance policy of the Kafka cluster. The structure is documented below.
- name str
- The name of the topic.
- network_id str
- ID of the network, to which the Kafka cluster belongs.
- security_group_ Sequence[str]ids 
- Security group ids, to which the Kafka cluster belongs.
- status str
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- subnet_ids Sequence[str]
- IDs of the subnets, to which the Kafka cluster belongs.
- topics
Sequence[MdbKafka Cluster Topic Args] 
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- users
Sequence[MdbKafka Cluster User Args] 
- A user of the Kafka cluster. The structure is documented below.
- config Property Map
- Configuration of the Kafka cluster. The structure is documented below.
- createdAt String
- Timestamp of cluster creation.
- deletionProtection Boolean
- Inhibits deletion of the cluster. Can be either trueorfalse.
- description String
- Description of the Kafka cluster.
- environment String
- Deployment environment of the Kafka cluster. Can be either PRESTABLEorPRODUCTION. The default isPRODUCTION.
- folderId String
- The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
- health String
- Health of the host.
- hostGroup List<String>Ids 
- A list of IDs of the host groups to place VMs of the cluster on.
- hosts List<Property Map>
- A host of the Kafka cluster. The structure is documented below.
- labels Map<String>
- A set of key/value label pairs to assign to the Kafka cluster.
- maintenanceWindow Property Map
- Maintenance policy of the Kafka cluster. The structure is documented below.
- name String
- The name of the topic.
- networkId String
- ID of the network, to which the Kafka cluster belongs.
- securityGroup List<String>Ids 
- Security group ids, to which the Kafka cluster belongs.
- status String
- Status of the cluster. Can be either CREATING,STARTING,RUNNING,UPDATING,STOPPING,STOPPED,ERRORorSTATUS_UNKNOWN. For more information seestatusfield of JSON representation in the official documentation.
- subnetIds List<String>
- IDs of the subnets, to which the Kafka cluster belongs.
- topics List<Property Map>
- To manage topics, please switch to using a separate resource type yandex.MdbKafkaTopic.
- users List<Property Map>
- A user of the Kafka cluster. The structure is documented below.
Supporting Types
MdbKafkaClusterConfig, MdbKafkaClusterConfigArgs        
- Kafka
MdbKafka Cluster Config Kafka 
- Configuration of the Kafka subcluster. The structure is documented below.
- Version string
- Version of the Kafka server software.
- Zones List<string>
- List of availability zones.
- AssignPublic boolIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- BrokersCount int
- Count of brokers per availability zone. The default is 1.
- SchemaRegistry bool
- Enables managed schema registry on cluster. The default is false.
- UnmanagedTopics bool
- Allows to use Kafka AdminAPI to manage topics. The default is false.
- Zookeeper
MdbKafka Cluster Config Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- Kafka
MdbKafka Cluster Config Kafka 
- Configuration of the Kafka subcluster. The structure is documented below.
- Version string
- Version of the Kafka server software.
- Zones []string
- List of availability zones.
- AssignPublic boolIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- BrokersCount int
- Count of brokers per availability zone. The default is 1.
- SchemaRegistry bool
- Enables managed schema registry on cluster. The default is false.
- UnmanagedTopics bool
- Allows to use Kafka AdminAPI to manage topics. The default is false.
- Zookeeper
MdbKafka Cluster Config Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- kafka
MdbKafka Cluster Config Kafka 
- Configuration of the Kafka subcluster. The structure is documented below.
- version String
- Version of the Kafka server software.
- zones List<String>
- List of availability zones.
- assignPublic BooleanIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- brokersCount Integer
- Count of brokers per availability zone. The default is 1.
- schemaRegistry Boolean
- Enables managed schema registry on cluster. The default is false.
- unmanagedTopics Boolean
- Allows to use Kafka AdminAPI to manage topics. The default is false.
- zookeeper
MdbKafka Cluster Config Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- kafka
MdbKafka Cluster Config Kafka 
- Configuration of the Kafka subcluster. The structure is documented below.
- version string
- Version of the Kafka server software.
- zones string[]
- List of availability zones.
- assignPublic booleanIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- brokersCount number
- Count of brokers per availability zone. The default is 1.
- schemaRegistry boolean
- Enables managed schema registry on cluster. The default is false.
- unmanagedTopics boolean
- Allows to use Kafka AdminAPI to manage topics. The default is false.
- zookeeper
MdbKafka Cluster Config Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- kafka
MdbKafka Cluster Config Kafka 
- Configuration of the Kafka subcluster. The structure is documented below.
- version str
- Version of the Kafka server software.
- zones Sequence[str]
- List of availability zones.
- assign_public_ boolip 
- Determines whether each broker will be assigned a public IP address. The default is false.
- brokers_count int
- Count of brokers per availability zone. The default is 1.
- schema_registry bool
- Enables managed schema registry on cluster. The default is false.
- unmanaged_topics bool
- Allows to use Kafka AdminAPI to manage topics. The default is false.
- zookeeper
MdbKafka Cluster Config Zookeeper 
- Configuration of the ZooKeeper subcluster. The structure is documented below.
- kafka Property Map
- Configuration of the Kafka subcluster. The structure is documented below.
- version String
- Version of the Kafka server software.
- zones List<String>
- List of availability zones.
- assignPublic BooleanIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- brokersCount Number
- Count of brokers per availability zone. The default is 1.
- schemaRegistry Boolean
- Enables managed schema registry on cluster. The default is false.
- unmanagedTopics Boolean
- Allows to use Kafka AdminAPI to manage topics. The default is false.
- zookeeper Property Map
- Configuration of the ZooKeeper subcluster. The structure is documented below.
MdbKafkaClusterConfigKafka, MdbKafkaClusterConfigKafkaArgs          
- Resources
MdbKafka Cluster Config Kafka Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- KafkaConfig MdbKafka Cluster Config Kafka Kafka Config 
- User-defined settings for the Kafka cluster. The structure is documented below.
- Resources
MdbKafka Cluster Config Kafka Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- KafkaConfig MdbKafka Cluster Config Kafka Kafka Config 
- User-defined settings for the Kafka cluster. The structure is documented below.
- resources
MdbKafka Cluster Config Kafka Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- kafkaConfig MdbKafka Cluster Config Kafka Kafka Config 
- User-defined settings for the Kafka cluster. The structure is documented below.
- resources
MdbKafka Cluster Config Kafka Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- kafkaConfig MdbKafka Cluster Config Kafka Kafka Config 
- User-defined settings for the Kafka cluster. The structure is documented below.
- resources
MdbKafka Cluster Config Kafka Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- kafka_config MdbKafka Cluster Config Kafka Kafka Config 
- User-defined settings for the Kafka cluster. The structure is documented below.
- resources Property Map
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- kafkaConfig Property Map
- User-defined settings for the Kafka cluster. The structure is documented below.
MdbKafkaClusterConfigKafkaKafkaConfig, MdbKafkaClusterConfigKafkaKafkaConfigArgs              
- AutoCreate boolTopics Enable 
- CompressionType string
- DefaultReplication stringFactor 
- LogFlush stringInterval Messages 
- LogFlush stringInterval Ms 
- LogFlush stringScheduler Interval Ms 
- LogPreallocate bool
- LogRetention stringBytes 
- LogRetention stringHours 
- LogRetention stringMinutes 
- LogRetention stringMs 
- LogSegment stringBytes 
- NumPartitions string
- SocketReceive stringBuffer Bytes 
- SocketSend stringBuffer Bytes 
- AutoCreate boolTopics Enable 
- CompressionType string
- DefaultReplication stringFactor 
- LogFlush stringInterval Messages 
- LogFlush stringInterval Ms 
- LogFlush stringScheduler Interval Ms 
- LogPreallocate bool
- LogRetention stringBytes 
- LogRetention stringHours 
- LogRetention stringMinutes 
- LogRetention stringMs 
- LogSegment stringBytes 
- NumPartitions string
- SocketReceive stringBuffer Bytes 
- SocketSend stringBuffer Bytes 
- autoCreate BooleanTopics Enable 
- compressionType String
- defaultReplication StringFactor 
- logFlush StringInterval Messages 
- logFlush StringInterval Ms 
- logFlush StringScheduler Interval Ms 
- logPreallocate Boolean
- logRetention StringBytes 
- logRetention StringHours 
- logRetention StringMinutes 
- logRetention StringMs 
- logSegment StringBytes 
- numPartitions String
- socketReceive StringBuffer Bytes 
- socketSend StringBuffer Bytes 
- autoCreate booleanTopics Enable 
- compressionType string
- defaultReplication stringFactor 
- logFlush stringInterval Messages 
- logFlush stringInterval Ms 
- logFlush stringScheduler Interval Ms 
- logPreallocate boolean
- logRetention stringBytes 
- logRetention stringHours 
- logRetention stringMinutes 
- logRetention stringMs 
- logSegment stringBytes 
- numPartitions string
- socketReceive stringBuffer Bytes 
- socketSend stringBuffer Bytes 
- auto_create_ booltopics_ enable 
- compression_type str
- default_replication_ strfactor 
- log_flush_ strinterval_ messages 
- log_flush_ strinterval_ ms 
- log_flush_ strscheduler_ interval_ ms 
- log_preallocate bool
- log_retention_ strbytes 
- log_retention_ strhours 
- log_retention_ strminutes 
- log_retention_ strms 
- log_segment_ strbytes 
- num_partitions str
- socket_receive_ strbuffer_ bytes 
- socket_send_ strbuffer_ bytes 
- autoCreate BooleanTopics Enable 
- compressionType String
- defaultReplication StringFactor 
- logFlush StringInterval Messages 
- logFlush StringInterval Ms 
- logFlush StringScheduler Interval Ms 
- logPreallocate Boolean
- logRetention StringBytes 
- logRetention StringHours 
- logRetention StringMinutes 
- logRetention StringMs 
- logSegment StringBytes 
- numPartitions String
- socketReceive StringBuffer Bytes 
- socketSend StringBuffer Bytes 
MdbKafkaClusterConfigKafkaResources, MdbKafkaClusterConfigKafkaResourcesArgs            
- DiskSize int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- DiskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- ResourcePreset stringId 
- DiskSize int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- DiskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- ResourcePreset stringId 
- diskSize Integer
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType StringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset StringId 
- diskSize number
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset stringId 
- disk_size int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk_type_ strid 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource_preset_ strid 
- diskSize Number
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType StringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset StringId 
MdbKafkaClusterConfigZookeeper, MdbKafkaClusterConfigZookeeperArgs          
- Resources
MdbKafka Cluster Config Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- Resources
MdbKafka Cluster Config Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
MdbKafka Cluster Config Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
MdbKafka Cluster Config Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources
MdbKafka Cluster Config Zookeeper Resources 
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
- resources Property Map
- Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
MdbKafkaClusterConfigZookeeperResources, MdbKafkaClusterConfigZookeeperResourcesArgs            
- DiskSize int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- DiskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- ResourcePreset stringId 
- DiskSize int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- DiskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- ResourcePreset stringId 
- diskSize Integer
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType StringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset StringId 
- diskSize number
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType stringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset stringId 
- disk_size int
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- disk_type_ strid 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resource_preset_ strid 
- diskSize Number
- Volume of the storage available to a ZooKeeper host, in gigabytes.
- diskType StringId 
- Type of the storage of ZooKeeper hosts. For more information see the official documentation.
- resourcePreset StringId 
MdbKafkaClusterHost, MdbKafkaClusterHostArgs        
- AssignPublic boolIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- Health string
- Health of the host.
- Name string
- The name of the topic.
- Role string
- The role type to grant to the topic.
- SubnetId string
- The ID of the subnet, to which the host belongs.
- ZoneId string
- The availability zone where the Kafka host was created.
- AssignPublic boolIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- Health string
- Health of the host.
- Name string
- The name of the topic.
- Role string
- The role type to grant to the topic.
- SubnetId string
- The ID of the subnet, to which the host belongs.
- ZoneId string
- The availability zone where the Kafka host was created.
- assignPublic BooleanIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- health String
- Health of the host.
- name String
- The name of the topic.
- role String
- The role type to grant to the topic.
- subnetId String
- The ID of the subnet, to which the host belongs.
- zoneId String
- The availability zone where the Kafka host was created.
- assignPublic booleanIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- health string
- Health of the host.
- name string
- The name of the topic.
- role string
- The role type to grant to the topic.
- subnetId string
- The ID of the subnet, to which the host belongs.
- zoneId string
- The availability zone where the Kafka host was created.
- assign_public_ boolip 
- Determines whether each broker will be assigned a public IP address. The default is false.
- health str
- Health of the host.
- name str
- The name of the topic.
- role str
- The role type to grant to the topic.
- subnet_id str
- The ID of the subnet, to which the host belongs.
- zone_id str
- The availability zone where the Kafka host was created.
- assignPublic BooleanIp 
- Determines whether each broker will be assigned a public IP address. The default is false.
- health String
- Health of the host.
- name String
- The name of the topic.
- role String
- The role type to grant to the topic.
- subnetId String
- The ID of the subnet, to which the host belongs.
- zoneId String
- The availability zone where the Kafka host was created.
MdbKafkaClusterMaintenanceWindow, MdbKafkaClusterMaintenanceWindowArgs          
- Type string
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- Day string
- Day of the week (in DDDformat). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
- Hour int
- Hour of the day in UTC (in HHformat). Allowed value is between 1 and 24.
- Type string
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- Day string
- Day of the week (in DDDformat). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
- Hour int
- Hour of the day in UTC (in HHformat). Allowed value is between 1 and 24.
- type String
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- day String
- Day of the week (in DDDformat). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
- hour Integer
- Hour of the day in UTC (in HHformat). Allowed value is between 1 and 24.
- type string
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- day string
- Day of the week (in DDDformat). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
- hour number
- Hour of the day in UTC (in HHformat). Allowed value is between 1 and 24.
- type str
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- day str
- Day of the week (in DDDformat). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
- hour int
- Hour of the day in UTC (in HHformat). Allowed value is between 1 and 24.
- type String
- Type of maintenance window. Can be either ANYTIMEorWEEKLY. A day and hour of window need to be specified with weekly window.
- day String
- Day of the week (in DDDformat). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"
- hour Number
- Hour of the day in UTC (in HHformat). Allowed value is between 1 and 24.
MdbKafkaClusterTopic, MdbKafkaClusterTopicArgs        
- Name string
- The name of the topic.
- Partitions int
- The number of the topic's partitions.
- ReplicationFactor int
- Amount of data copies (replicas) for the topic in the cluster.
- TopicConfig MdbKafka Cluster Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- Name string
- The name of the topic.
- Partitions int
- The number of the topic's partitions.
- ReplicationFactor int
- Amount of data copies (replicas) for the topic in the cluster.
- TopicConfig MdbKafka Cluster Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- name String
- The name of the topic.
- partitions Integer
- The number of the topic's partitions.
- replicationFactor Integer
- Amount of data copies (replicas) for the topic in the cluster.
- topicConfig MdbKafka Cluster Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- name string
- The name of the topic.
- partitions number
- The number of the topic's partitions.
- replicationFactor number
- Amount of data copies (replicas) for the topic in the cluster.
- topicConfig MdbKafka Cluster Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- name str
- The name of the topic.
- partitions int
- The number of the topic's partitions.
- replication_factor int
- Amount of data copies (replicas) for the topic in the cluster.
- topic_config MdbKafka Cluster Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- name String
- The name of the topic.
- partitions Number
- The number of the topic's partitions.
- replicationFactor Number
- Amount of data copies (replicas) for the topic in the cluster.
- topicConfig Property Map
- User-defined settings for the topic. The structure is documented below.
MdbKafkaClusterTopicTopicConfig, MdbKafkaClusterTopicTopicConfigArgs            
- CleanupPolicy string
- CompressionType string
- DeleteRetention stringMs 
- FileDelete stringDelay Ms 
- FlushMessages string
- FlushMs string
- MaxMessage stringBytes 
- MinCompaction stringLag Ms 
- MinInsync stringReplicas 
- Preallocate bool
- RetentionBytes string
- RetentionMs string
- SegmentBytes string
- CleanupPolicy string
- CompressionType string
- DeleteRetention stringMs 
- FileDelete stringDelay Ms 
- FlushMessages string
- FlushMs string
- MaxMessage stringBytes 
- MinCompaction stringLag Ms 
- MinInsync stringReplicas 
- Preallocate bool
- RetentionBytes string
- RetentionMs string
- SegmentBytes string
- cleanupPolicy String
- compressionType String
- deleteRetention StringMs 
- fileDelete StringDelay Ms 
- flushMessages String
- flushMs String
- maxMessage StringBytes 
- minCompaction StringLag Ms 
- minInsync StringReplicas 
- preallocate Boolean
- retentionBytes String
- retentionMs String
- segmentBytes String
- cleanupPolicy string
- compressionType string
- deleteRetention stringMs 
- fileDelete stringDelay Ms 
- flushMessages string
- flushMs string
- maxMessage stringBytes 
- minCompaction stringLag Ms 
- minInsync stringReplicas 
- preallocate boolean
- retentionBytes string
- retentionMs string
- segmentBytes string
- cleanup_policy str
- compression_type str
- delete_retention_ strms 
- file_delete_ strdelay_ ms 
- flush_messages str
- flush_ms str
- max_message_ strbytes 
- min_compaction_ strlag_ ms 
- min_insync_ strreplicas 
- preallocate bool
- retention_bytes str
- retention_ms str
- segment_bytes str
- cleanupPolicy String
- compressionType String
- deleteRetention StringMs 
- fileDelete StringDelay Ms 
- flushMessages String
- flushMs String
- maxMessage StringBytes 
- minCompaction StringLag Ms 
- minInsync StringReplicas 
- preallocate Boolean
- retentionBytes String
- retentionMs String
- segmentBytes String
MdbKafkaClusterUser, MdbKafkaClusterUserArgs        
- Name string
- The name of the topic.
- Password string
- The password of the user.
- Permissions
List<MdbKafka Cluster User Permission> 
- Set of permissions granted to the user. The structure is documented below.
- Name string
- The name of the topic.
- Password string
- The password of the user.
- Permissions
[]MdbKafka Cluster User Permission 
- Set of permissions granted to the user. The structure is documented below.
- name String
- The name of the topic.
- password String
- The password of the user.
- permissions
List<MdbKafka Cluster User Permission> 
- Set of permissions granted to the user. The structure is documented below.
- name string
- The name of the topic.
- password string
- The password of the user.
- permissions
MdbKafka Cluster User Permission[] 
- Set of permissions granted to the user. The structure is documented below.
- name str
- The name of the topic.
- password str
- The password of the user.
- permissions
Sequence[MdbKafka Cluster User Permission] 
- Set of permissions granted to the user. The structure is documented below.
- name String
- The name of the topic.
- password String
- The password of the user.
- permissions List<Property Map>
- Set of permissions granted to the user. The structure is documented below.
MdbKafkaClusterUserPermission, MdbKafkaClusterUserPermissionArgs          
- role str
- The role type to grant to the topic.
- topic_name str
- The name of the topic that the permission grants access to.
Import
A cluster can be imported using the id of the resource, e.g.
 $ pulumi import yandex:index/mdbKafkaCluster:MdbKafkaCluster foo cluster_id
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Yandex pulumi/pulumi-yandex
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the yandexTerraform Provider.