yandex.MdbKafkaTopic
Explore with Pulumi AI
Manages a topic of a Kafka cluster within the Yandex.Cloud. For more information, see the official documentation.
Example Usage
using Pulumi;
using Yandex = Pulumi.Yandex;
class MyStack : Stack
{
    public MyStack()
    {
        var foo = new Yandex.MdbKafkaCluster("foo", new Yandex.MdbKafkaClusterArgs
        {
            NetworkId = "c64vs98keiqc7f24pvkd",
            Config = new Yandex.Inputs.MdbKafkaClusterConfigArgs
            {
                Version = "2.8",
                Zones = 
                {
                    "ru-central1-a",
                },
                UnmanagedTopics = true,
                Kafka = new Yandex.Inputs.MdbKafkaClusterConfigKafkaArgs
                {
                    Resources = new Yandex.Inputs.MdbKafkaClusterConfigKafkaResourcesArgs
                    {
                        ResourcePresetId = "s2.micro",
                        DiskTypeId = "network-hdd",
                        DiskSize = 16,
                    },
                },
            },
        });
        var events = new Yandex.MdbKafkaTopic("events", new Yandex.MdbKafkaTopicArgs
        {
            ClusterId = foo.Id,
            Partitions = 4,
            ReplicationFactor = 1,
            TopicConfig = new Yandex.Inputs.MdbKafkaTopicTopicConfigArgs
            {
                CleanupPolicy = "CLEANUP_POLICY_COMPACT",
                CompressionType = "COMPRESSION_TYPE_LZ4",
                DeleteRetentionMs = "86400000",
                FileDeleteDelayMs = "60000",
                FlushMessages = "128",
                FlushMs = "1000",
                MinCompactionLagMs = "0",
                RetentionBytes = "10737418240",
                RetentionMs = "604800000",
                MaxMessageBytes = "1048588",
                MinInsyncReplicas = "1",
                SegmentBytes = "268435456",
                Preallocate = true,
            },
        });
    }
}
package main
import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		foo, err := yandex.NewMdbKafkaCluster(ctx, "foo", &yandex.MdbKafkaClusterArgs{
			NetworkId: pulumi.String("c64vs98keiqc7f24pvkd"),
			Config: &MdbKafkaClusterConfigArgs{
				Version: pulumi.String("2.8"),
				Zones: pulumi.StringArray{
					pulumi.String("ru-central1-a"),
				},
				UnmanagedTopics: pulumi.Bool(true),
				Kafka: &MdbKafkaClusterConfigKafkaArgs{
					Resources: &MdbKafkaClusterConfigKafkaResourcesArgs{
						ResourcePresetId: pulumi.String("s2.micro"),
						DiskTypeId:       pulumi.String("network-hdd"),
						DiskSize:         pulumi.Int(16),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbKafkaTopic(ctx, "events", &yandex.MdbKafkaTopicArgs{
			ClusterId:         foo.ID(),
			Partitions:        pulumi.Int(4),
			ReplicationFactor: pulumi.Int(1),
			TopicConfig: &MdbKafkaTopicTopicConfigArgs{
				CleanupPolicy:      pulumi.String("CLEANUP_POLICY_COMPACT"),
				CompressionType:    pulumi.String("COMPRESSION_TYPE_LZ4"),
				DeleteRetentionMs:  pulumi.String("86400000"),
				FileDeleteDelayMs:  pulumi.String("60000"),
				FlushMessages:      pulumi.String("128"),
				FlushMs:            pulumi.String("1000"),
				MinCompactionLagMs: pulumi.String("0"),
				RetentionBytes:     pulumi.String("10737418240"),
				RetentionMs:        pulumi.String("604800000"),
				MaxMessageBytes:    pulumi.String("1048588"),
				MinInsyncReplicas:  pulumi.String("1"),
				SegmentBytes:       pulumi.String("268435456"),
				Preallocate:        pulumi.Bool(true),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Coming soon!
import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";
const foo = new yandex.MdbKafkaCluster("foo", {
    networkId: "c64vs98keiqc7f24pvkd",
    config: {
        version: "2.8",
        zones: ["ru-central1-a"],
        unmanagedTopics: true,
        kafka: {
            resources: {
                resourcePresetId: "s2.micro",
                diskTypeId: "network-hdd",
                diskSize: 16,
            },
        },
    },
});
const events = new yandex.MdbKafkaTopic("events", {
    clusterId: foo.id,
    partitions: 4,
    replicationFactor: 1,
    topicConfig: {
        cleanupPolicy: "CLEANUP_POLICY_COMPACT",
        compressionType: "COMPRESSION_TYPE_LZ4",
        deleteRetentionMs: 86400000,
        fileDeleteDelayMs: 60000,
        flushMessages: 128,
        flushMs: 1000,
        minCompactionLagMs: 0,
        retentionBytes: 10737418240,
        retentionMs: 604800000,
        maxMessageBytes: 1048588,
        minInsyncReplicas: 1,
        segmentBytes: 268435456,
        preallocate: true,
    },
});
import pulumi
import pulumi_yandex as yandex
foo = yandex.MdbKafkaCluster("foo",
    network_id="c64vs98keiqc7f24pvkd",
    config=yandex.MdbKafkaClusterConfigArgs(
        version="2.8",
        zones=["ru-central1-a"],
        unmanaged_topics=True,
        kafka=yandex.MdbKafkaClusterConfigKafkaArgs(
            resources=yandex.MdbKafkaClusterConfigKafkaResourcesArgs(
                resource_preset_id="s2.micro",
                disk_type_id="network-hdd",
                disk_size=16,
            ),
        ),
    ))
events = yandex.MdbKafkaTopic("events",
    cluster_id=foo.id,
    partitions=4,
    replication_factor=1,
    topic_config=yandex.MdbKafkaTopicTopicConfigArgs(
        cleanup_policy="CLEANUP_POLICY_COMPACT",
        compression_type="COMPRESSION_TYPE_LZ4",
        delete_retention_ms="86400000",
        file_delete_delay_ms="60000",
        flush_messages="128",
        flush_ms="1000",
        min_compaction_lag_ms="0",
        retention_bytes="10737418240",
        retention_ms="604800000",
        max_message_bytes="1048588",
        min_insync_replicas="1",
        segment_bytes="268435456",
        preallocate=True,
    ))
Coming soon!
Create MdbKafkaTopic Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new MdbKafkaTopic(name: string, args: MdbKafkaTopicArgs, opts?: CustomResourceOptions);@overload
def MdbKafkaTopic(resource_name: str,
                  args: MdbKafkaTopicArgs,
                  opts: Optional[ResourceOptions] = None)
@overload
def MdbKafkaTopic(resource_name: str,
                  opts: Optional[ResourceOptions] = None,
                  cluster_id: Optional[str] = None,
                  partitions: Optional[int] = None,
                  replication_factor: Optional[int] = None,
                  name: Optional[str] = None,
                  topic_config: Optional[MdbKafkaTopicTopicConfigArgs] = None)func NewMdbKafkaTopic(ctx *Context, name string, args MdbKafkaTopicArgs, opts ...ResourceOption) (*MdbKafkaTopic, error)public MdbKafkaTopic(string name, MdbKafkaTopicArgs args, CustomResourceOptions? opts = null)
public MdbKafkaTopic(String name, MdbKafkaTopicArgs args)
public MdbKafkaTopic(String name, MdbKafkaTopicArgs args, CustomResourceOptions options)
type: yandex:MdbKafkaTopic
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args MdbKafkaTopicArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args MdbKafkaTopicArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args MdbKafkaTopicArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args MdbKafkaTopicArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args MdbKafkaTopicArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var mdbKafkaTopicResource = new Yandex.MdbKafkaTopic("mdbKafkaTopicResource", new()
{
    ClusterId = "string",
    Partitions = 0,
    ReplicationFactor = 0,
    Name = "string",
    TopicConfig = new Yandex.Inputs.MdbKafkaTopicTopicConfigArgs
    {
        CleanupPolicy = "string",
        CompressionType = "string",
        DeleteRetentionMs = "string",
        FileDeleteDelayMs = "string",
        FlushMessages = "string",
        FlushMs = "string",
        MaxMessageBytes = "string",
        MinCompactionLagMs = "string",
        MinInsyncReplicas = "string",
        Preallocate = false,
        RetentionBytes = "string",
        RetentionMs = "string",
        SegmentBytes = "string",
    },
});
example, err := yandex.NewMdbKafkaTopic(ctx, "mdbKafkaTopicResource", &yandex.MdbKafkaTopicArgs{
	ClusterId:         pulumi.String("string"),
	Partitions:        pulumi.Int(0),
	ReplicationFactor: pulumi.Int(0),
	Name:              pulumi.String("string"),
	TopicConfig: &yandex.MdbKafkaTopicTopicConfigArgs{
		CleanupPolicy:      pulumi.String("string"),
		CompressionType:    pulumi.String("string"),
		DeleteRetentionMs:  pulumi.String("string"),
		FileDeleteDelayMs:  pulumi.String("string"),
		FlushMessages:      pulumi.String("string"),
		FlushMs:            pulumi.String("string"),
		MaxMessageBytes:    pulumi.String("string"),
		MinCompactionLagMs: pulumi.String("string"),
		MinInsyncReplicas:  pulumi.String("string"),
		Preallocate:        pulumi.Bool(false),
		RetentionBytes:     pulumi.String("string"),
		RetentionMs:        pulumi.String("string"),
		SegmentBytes:       pulumi.String("string"),
	},
})
var mdbKafkaTopicResource = new MdbKafkaTopic("mdbKafkaTopicResource", MdbKafkaTopicArgs.builder()
    .clusterId("string")
    .partitions(0)
    .replicationFactor(0)
    .name("string")
    .topicConfig(MdbKafkaTopicTopicConfigArgs.builder()
        .cleanupPolicy("string")
        .compressionType("string")
        .deleteRetentionMs("string")
        .fileDeleteDelayMs("string")
        .flushMessages("string")
        .flushMs("string")
        .maxMessageBytes("string")
        .minCompactionLagMs("string")
        .minInsyncReplicas("string")
        .preallocate(false)
        .retentionBytes("string")
        .retentionMs("string")
        .segmentBytes("string")
        .build())
    .build());
mdb_kafka_topic_resource = yandex.MdbKafkaTopic("mdbKafkaTopicResource",
    cluster_id="string",
    partitions=0,
    replication_factor=0,
    name="string",
    topic_config={
        "cleanup_policy": "string",
        "compression_type": "string",
        "delete_retention_ms": "string",
        "file_delete_delay_ms": "string",
        "flush_messages": "string",
        "flush_ms": "string",
        "max_message_bytes": "string",
        "min_compaction_lag_ms": "string",
        "min_insync_replicas": "string",
        "preallocate": False,
        "retention_bytes": "string",
        "retention_ms": "string",
        "segment_bytes": "string",
    })
const mdbKafkaTopicResource = new yandex.MdbKafkaTopic("mdbKafkaTopicResource", {
    clusterId: "string",
    partitions: 0,
    replicationFactor: 0,
    name: "string",
    topicConfig: {
        cleanupPolicy: "string",
        compressionType: "string",
        deleteRetentionMs: "string",
        fileDeleteDelayMs: "string",
        flushMessages: "string",
        flushMs: "string",
        maxMessageBytes: "string",
        minCompactionLagMs: "string",
        minInsyncReplicas: "string",
        preallocate: false,
        retentionBytes: "string",
        retentionMs: "string",
        segmentBytes: "string",
    },
});
type: yandex:MdbKafkaTopic
properties:
    clusterId: string
    name: string
    partitions: 0
    replicationFactor: 0
    topicConfig:
        cleanupPolicy: string
        compressionType: string
        deleteRetentionMs: string
        fileDeleteDelayMs: string
        flushMessages: string
        flushMs: string
        maxMessageBytes: string
        minCompactionLagMs: string
        minInsyncReplicas: string
        preallocate: false
        retentionBytes: string
        retentionMs: string
        segmentBytes: string
MdbKafkaTopic Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The MdbKafkaTopic resource accepts the following input properties:
- ClusterId string
- Partitions int
- The number of the topic's partitions.
- ReplicationFactor int
- Amount of data copies (replicas) for the topic in the cluster.
- Name string
- The name of the topic.
- TopicConfig MdbKafka Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- ClusterId string
- Partitions int
- The number of the topic's partitions.
- ReplicationFactor int
- Amount of data copies (replicas) for the topic in the cluster.
- Name string
- The name of the topic.
- TopicConfig MdbKafka Topic Topic Config Args 
- User-defined settings for the topic. The structure is documented below.
- clusterId String
- partitions Integer
- The number of the topic's partitions.
- replicationFactor Integer
- Amount of data copies (replicas) for the topic in the cluster.
- name String
- The name of the topic.
- topicConfig MdbKafka Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- clusterId string
- partitions number
- The number of the topic's partitions.
- replicationFactor number
- Amount of data copies (replicas) for the topic in the cluster.
- name string
- The name of the topic.
- topicConfig MdbKafka Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- cluster_id str
- partitions int
- The number of the topic's partitions.
- replication_factor int
- Amount of data copies (replicas) for the topic in the cluster.
- name str
- The name of the topic.
- topic_config MdbKafka Topic Topic Config Args 
- User-defined settings for the topic. The structure is documented below.
- clusterId String
- partitions Number
- The number of the topic's partitions.
- replicationFactor Number
- Amount of data copies (replicas) for the topic in the cluster.
- name String
- The name of the topic.
- topicConfig Property Map
- User-defined settings for the topic. The structure is documented below.
Outputs
All input properties are implicitly available as output properties. Additionally, the MdbKafkaTopic resource produces the following output properties:
- Id string
- The provider-assigned unique ID for this managed resource.
- Id string
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
- id string
- The provider-assigned unique ID for this managed resource.
- id str
- The provider-assigned unique ID for this managed resource.
- id String
- The provider-assigned unique ID for this managed resource.
Look up Existing MdbKafkaTopic Resource
Get an existing MdbKafkaTopic resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: MdbKafkaTopicState, opts?: CustomResourceOptions): MdbKafkaTopic@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        cluster_id: Optional[str] = None,
        name: Optional[str] = None,
        partitions: Optional[int] = None,
        replication_factor: Optional[int] = None,
        topic_config: Optional[MdbKafkaTopicTopicConfigArgs] = None) -> MdbKafkaTopicfunc GetMdbKafkaTopic(ctx *Context, name string, id IDInput, state *MdbKafkaTopicState, opts ...ResourceOption) (*MdbKafkaTopic, error)public static MdbKafkaTopic Get(string name, Input<string> id, MdbKafkaTopicState? state, CustomResourceOptions? opts = null)public static MdbKafkaTopic get(String name, Output<String> id, MdbKafkaTopicState state, CustomResourceOptions options)resources:  _:    type: yandex:MdbKafkaTopic    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- ClusterId string
- Name string
- The name of the topic.
- Partitions int
- The number of the topic's partitions.
- ReplicationFactor int
- Amount of data copies (replicas) for the topic in the cluster.
- TopicConfig MdbKafka Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- ClusterId string
- Name string
- The name of the topic.
- Partitions int
- The number of the topic's partitions.
- ReplicationFactor int
- Amount of data copies (replicas) for the topic in the cluster.
- TopicConfig MdbKafka Topic Topic Config Args 
- User-defined settings for the topic. The structure is documented below.
- clusterId String
- name String
- The name of the topic.
- partitions Integer
- The number of the topic's partitions.
- replicationFactor Integer
- Amount of data copies (replicas) for the topic in the cluster.
- topicConfig MdbKafka Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- clusterId string
- name string
- The name of the topic.
- partitions number
- The number of the topic's partitions.
- replicationFactor number
- Amount of data copies (replicas) for the topic in the cluster.
- topicConfig MdbKafka Topic Topic Config 
- User-defined settings for the topic. The structure is documented below.
- cluster_id str
- name str
- The name of the topic.
- partitions int
- The number of the topic's partitions.
- replication_factor int
- Amount of data copies (replicas) for the topic in the cluster.
- topic_config MdbKafka Topic Topic Config Args 
- User-defined settings for the topic. The structure is documented below.
- clusterId String
- name String
- The name of the topic.
- partitions Number
- The number of the topic's partitions.
- replicationFactor Number
- Amount of data copies (replicas) for the topic in the cluster.
- topicConfig Property Map
- User-defined settings for the topic. The structure is documented below.
Supporting Types
MdbKafkaTopicTopicConfig, MdbKafkaTopicTopicConfigArgs          
- CleanupPolicy string
- CompressionType string
- DeleteRetention stringMs 
- FileDelete stringDelay Ms 
- FlushMessages string
- FlushMs string
- MaxMessage stringBytes 
- MinCompaction stringLag Ms 
- MinInsync stringReplicas 
- Preallocate bool
- RetentionBytes string
- RetentionMs string
- SegmentBytes string
- CleanupPolicy string
- CompressionType string
- DeleteRetention stringMs 
- FileDelete stringDelay Ms 
- FlushMessages string
- FlushMs string
- MaxMessage stringBytes 
- MinCompaction stringLag Ms 
- MinInsync stringReplicas 
- Preallocate bool
- RetentionBytes string
- RetentionMs string
- SegmentBytes string
- cleanupPolicy String
- compressionType String
- deleteRetention StringMs 
- fileDelete StringDelay Ms 
- flushMessages String
- flushMs String
- maxMessage StringBytes 
- minCompaction StringLag Ms 
- minInsync StringReplicas 
- preallocate Boolean
- retentionBytes String
- retentionMs String
- segmentBytes String
- cleanupPolicy string
- compressionType string
- deleteRetention stringMs 
- fileDelete stringDelay Ms 
- flushMessages string
- flushMs string
- maxMessage stringBytes 
- minCompaction stringLag Ms 
- minInsync stringReplicas 
- preallocate boolean
- retentionBytes string
- retentionMs string
- segmentBytes string
- cleanup_policy str
- compression_type str
- delete_retention_ strms 
- file_delete_ strdelay_ ms 
- flush_messages str
- flush_ms str
- max_message_ strbytes 
- min_compaction_ strlag_ ms 
- min_insync_ strreplicas 
- preallocate bool
- retention_bytes str
- retention_ms str
- segment_bytes str
- cleanupPolicy String
- compressionType String
- deleteRetention StringMs 
- fileDelete StringDelay Ms 
- flushMessages String
- flushMs String
- maxMessage StringBytes 
- minCompaction StringLag Ms 
- minInsync StringReplicas 
- preallocate Boolean
- retentionBytes String
- retentionMs String
- segmentBytes String
Import
Kafka topic can be imported using following format
 $ pulumi import yandex:index/mdbKafkaTopic:MdbKafkaTopic foo {{cluster_id}}:{{topic_name}}
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Yandex pulumi/pulumi-yandex
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the yandexTerraform Provider.