kafka-topics.sh --create --bootstrap-server localhost:9092 --topic my-topic --partitions 3 --replication-factor 2
kafka-topics.sh --delete --bootstrap-server localhost:9092 --topic my-topic
kafka-topics.sh --list --bootstrap-server localhost:9092
kafka-topics.sh --describe --bootstrap-server localhost:9092 --topic my-topic
kafka-topics.sh --describe --bootstrap-server :9092 --under-replicated-partitions
kafka-topics.sh --alter --bootstrap-server :9092 --topic my-topic --partitions 6
kafka-console-producer.sh --bootstrap-server localhost:9092 --topic my-topic
kafka-console-producer.sh --bootstrap-server :9092 --topic my-topic --property parse.key=true --property key.separator=:
kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic my-topic --from-beginning
kafka-console-consumer.sh --bootstrap-server :9092 --topic my-topic --group my-group --max-messages 100
kafka-console-consumer.sh --bootstrap-server :9092 --topic my-topic --partition 0 --offset 42
kafka-console-consumer.sh --bootstrap-server :9092 --topic my-topic --property print.key=true --property print.timestamp=true
kafka-consumer-groups.sh --bootstrap-server :9092 --list
kafka-consumer-groups.sh --bootstrap-server :9092 --describe --group my-group
kafka-consumer-groups.sh --bootstrap-server :9092 --describe --all-groups
kafka-consumer-groups.sh --bootstrap-server :9092 --group my-group --reset-offsets --to-earliest --topic my-topic --execute
kafka-consumer-groups.sh --bootstrap-server :9092 --group my-group --reset-offsets --to-datetime 2024-01-01T00:00:00.000 --topic my-topic --execute
kafka-consumer-groups.sh --bootstrap-server :9092 --group my-group --reset-offsets --shift-by -100 --all-topics --execute
kafka-consumer-groups.sh --bootstrap-server :9092 --delete --group my-group
kafka-configs.sh --bootstrap-server :9092 --entity-type topics --entity-name my-topic --alter --add-config retention.ms=86400000
kafka-broker-api-versions.sh --bootstrap-server localhost:9092
kafka-log-dirs.sh --bootstrap-server :9092 --topic-list my-topic
kafka-get-offsets.sh --bootstrap-server :9092 --topic my-topic
kafka-producer-perf-test.sh --topic perf-test --num-records 1000000 --record-size 1000 --throughput -1 --producer-props bootstrap.servers=localhost:9092
kafka-consumer-perf-test.sh --bootstrap-server :9092 --topic perf-test --messages 1000000
kafka-storage.sh format -t $(kafka-storage.sh random-uuid) -c config/kraft/server.properties
kafka-metadata-quorum.sh --bootstrap-server :9092 --describe --status
acks=0, retries=0acks=all, retries=Nidempotence + transactionsenable.idempotence=true + transaction API.transactional.id=Xread_committed on consumer.| Pattern | Key Config | Notes |
|---|---|---|
| High throughput | linger.ms=20, batch.size=65536, compression=lz4 | Trade latency for throughput |
| Low latency | linger.ms=0, acks=1 | Accept weaker guarantees |
| Dead letter queue | separate topic, catch exceptions | Route failed msgs to DLQ topic |
| Fan-out | multiple consumer groups | Each group reads all messages |
| Queue | 1 partition, 1 consumer group | FIFO processing |
| Log compaction | cleanup.policy=compact | Latest value per key retained |
| Event sourcing | retention.ms=-1, compact | Infinite retention + compaction |