@@ -41,6 +41,7 @@ _kafka_acls_args+=('--topic: topic to which ACLs should be added or removed. A v
4141_kafka_acls_args+=(' --transactional-id: The transactionalId to which ACLs should be added or removed. A value of * indicates the ACLs should apply to all transactionalIds.' )
4242_kafka_acls_args+=(' --version: Display Kafka version.' )
4343compdef " _kafka-command kafka-acls" kafka-acls
44+ compdef " _kafka-command kafka-acls" kafka-acls.sh
4445declare -a _kafka_avro_console_consumer_args
4546_kafka_avro_console_consumer_args=()
4647_kafka_avro_console_consumer_args+=(' --bootstrap-server: REQUIRED: The server(s) to connect to.' )
@@ -64,6 +65,7 @@ _kafka_avro_console_consumer_args+=('--value-deserializer:')
6465_kafka_avro_console_consumer_args+=(' --version: Display Kafka version.' )
6566_kafka_avro_console_consumer_args+=(' --whitelist: Regular expression specifying whitelist of topics to include for consumption.' )
6667compdef " _kafka-command kafka-avro-console-consumer" kafka-avro-console-consumer
68+ compdef " _kafka-command kafka-avro-console-consumer" kafka-avro-console-consumer.sh
6769declare -a _kafka_avro_console_producer_args
6870_kafka_avro_console_producer_args=()
6971_kafka_avro_console_producer_args+=(' --batch-size: Number of messages to send in a single batch if they are not being sent synchronously. (default: 200)' )
@@ -88,13 +90,15 @@ _kafka_avro_console_producer_args+=('--timeout: If set and the producer is runni
8890_kafka_avro_console_producer_args+=(' --topic: REQUIRED: The topic id to produce messages to.' )
8991_kafka_avro_console_producer_args+=(' --version: Display Kafka version.' )
9092compdef " _kafka-command kafka-avro-console-producer" kafka-avro-console-producer
93+ compdef " _kafka-command kafka-avro-console-producer" kafka-avro-console-producer.sh
9194declare -a _kafka_broker_api_versions_args
9295_kafka_broker_api_versions_args=()
9396_kafka_broker_api_versions_args+=(' --bootstrap-server: REQUIRED: The server to connect to.' )
9497_kafka_broker_api_versions_args+=(' --command-config: A property file containing configs to be passed to Admin Client.' )
9598_kafka_broker_api_versions_args+=(' --help: Print usage information.' )
9699_kafka_broker_api_versions_args+=(' --version: Display Kafka version.' )
97100compdef " _kafka-command kafka-broker-api-versions" kafka-broker-api-versions
101+ compdef " _kafka-command kafka-broker-api-versions" kafka-broker-api-versions.sh
98102declare -a _kafka_configs_args
99103_kafka_configs_args=()
100104_kafka_configs_args+=('--add-config: Key Value pairs of configs to add. Square brackets can be used to group values which contain commas: ''k1=v1, k2=[v1,v2,v2],k3=v3''. The following is a list of valid configurations: For entity-type ''topics'': cleanup.policy compression.type confluent.append.record.interceptor. classes confluent.key.schema.validation confluent.placement.constraints confluent.tier.enable confluent.tier.local.hotset.bytes confluent.tier.local.hotset.ms confluent.value.schema.validation delete.retention.ms file.delete.delay.ms flush.messages flush.ms follower.replication.throttled. replicas index.interval.bytes leader.replication.throttled.replicas max.compaction.lag.ms max.message.bytes message.downconversion.enable message.format.version message.timestamp.difference.max.ms message.timestamp.type min.cleanable.dirty.ratio min.compaction.lag.ms min.insync.replicas preallocate retention.bytes retention.ms segment.bytes segment.index.bytes segment.jitter.ms segment.ms unclean.leader.election.enable For entity-type ''brokers'': advertised.listeners background.threads compression.type confluent.append.record.interceptor. classes confluent.tier.enable confluent.tier.local.hotset.bytes confluent.tier.local.hotset.ms follower.replication.throttled.rate leader.replication.throttled.rate listener.security.protocol.map listeners log.cleaner.backoff.ms log.cleaner.dedupe.buffer.size log.cleaner.delete.retention.ms log.cleaner.io.buffer.load.factor log.cleaner.io.buffer.size log.cleaner.io.max.bytes.per.second log.cleaner.max.compaction.lag.ms log.cleaner.min.cleanable.ratio log.cleaner.min.compaction.lag.ms log.cleaner.threads log.cleanup.policy log.flush.interval.messages log.flush.interval.ms log.index.interval.bytes log.index.size.max.bytes log.message.downconversion.enable log.message.timestamp.difference.max. ms log.message.timestamp.type log.preallocate log.retention.bytes log.retention.ms log.roll.jitter.ms log.roll.ms log.segment.bytes log.segment.delete.delay.ms max.connections max.connections.per.ip max.connections.per.ip.overrides message.max.bytes metric.reporters min.insync.replicas num.io.threads num.network.threads num.recovery.threads.per.data.dir num.replica.fetchers principal.builder.class replica.alter.log.dirs.io.max.bytes. per.second sasl.enabled.mechanisms sasl.jaas.config sasl.kerberos.kinit.cmd sasl.kerberos.min.time.before.relogin sasl.kerberos.principal.to.local.rules sasl.kerberos.service.name sasl.kerberos.ticket.renew.jitter sasl.kerberos.ticket.renew.window. factor sasl.login.refresh.buffer.seconds sasl.login.refresh.min.period.seconds sasl.login.refresh.window.factor sasl.login.refresh.window.jitter sasl.mechanism.inter.broker.protocol ssl.cipher.suites ssl.client.auth ssl.enabled.protocols ssl.endpoint.identification.algorithm ssl.key.password ssl.keymanager.algorithm ssl.keystore.location ssl.keystore.password ssl.keystore.type ssl.protocol ssl.provider ssl.secure.random.implementation ssl.trustmanager.algorithm ssl.truststore.location ssl.truststore.password ssl.truststore.type unclean.leader.election.enable For entity-type ''users'': SCRAM-SHA-256 SCRAM-SHA-512 consumer_byte_rate producer_byte_rate request_percentage For entity-type ''clients'': consumer_byte_rate producer_byte_rate request_percentage Entity types ''users'' and ''clients'' may be specified together to update config for clients of a specific user.')
@@ -112,6 +116,7 @@ _kafka_configs_args+=('--replica-placement: This configuration is a JSON object
112116_kafka_configs_args+=(' --version: Display Kafka version.' )
113117_kafka_configs_args+=(' --zookeeper: REQUIRED: The connection string for the zookeeper connection in the form host:port. Multiple URLS can be given to allow fail-over.' )
114118compdef " _kafka-command kafka-configs" kafka-configs
119+ compdef " _kafka-command kafka-configs" kafka-configs.sh
115120declare -a _kafka_console_consumer_args
116121_kafka_console_consumer_args=()
117122_kafka_console_consumer_args+=(' --bootstrap-server: REQUIRED: The server(s) to connect to.' )
@@ -135,6 +140,7 @@ _kafka_console_consumer_args+=('--value-deserializer:')
135140_kafka_console_consumer_args+=(' --version: Display Kafka version.' )
136141_kafka_console_consumer_args+=(' --whitelist: Regular expression specifying whitelist of topics to include for consumption.' )
137142compdef " _kafka-command kafka-console-consumer" kafka-console-consumer
143+ compdef " _kafka-command kafka-console-consumer" kafka-console-consumer.sh
138144declare -a _kafka_console_producer_args
139145_kafka_console_producer_args=()
140146_kafka_console_producer_args+=(' --batch-size: Number of messages to send in a single batch if they are not being sent synchronously. (default: 200)' )
@@ -159,6 +165,7 @@ _kafka_console_producer_args+=('--timeout: If set and the producer is running in
159165_kafka_console_producer_args+=(' --topic: REQUIRED: The topic id to produce messages to.' )
160166_kafka_console_producer_args+=(' --version: Display Kafka version.' )
161167compdef " _kafka-command kafka-console-producer" kafka-console-producer
168+ compdef " _kafka-command kafka-console-producer" kafka-console-producer.sh
162169declare -a _kafka_consumer_groups_args
163170_kafka_consumer_groups_args=()
164171_kafka_consumer_groups_args+=(' --all-groups: Apply to all consumer groups.' )
@@ -191,6 +198,7 @@ _kafka_consumer_groups_args+=('--topic: The topic whose consumer group informati
191198_kafka_consumer_groups_args+=(' --verbose: Provide additional information, if any, when describing the group. This option may be used with ' ' -- offsets' ' /' ' --members' ' /' ' --state' ' and ' ' --bootstrap-server' ' options only. Example: --bootstrap-server localhost: 9092 --describe --group group1 -- members --verbose' )
192199_kafka_consumer_groups_args+=(' --version: Display Kafka version.' )
193200compdef " _kafka-command kafka-consumer-groups" kafka-consumer-groups
201+ compdef " _kafka-command kafka-consumer-groups" kafka-consumer-groups.sh
194202declare -a _kafka_consumer_perf_test_args
195203_kafka_consumer_perf_test_args=()
196204_kafka_consumer_perf_test_args+=(' --broker-list: REQUIRED: The server(s) to connect to.' )
@@ -212,6 +220,7 @@ _kafka_consumer_perf_test_args+=('--timeout: The maximum allowed time in millise
212220_kafka_consumer_perf_test_args+=(' --topic: REQUIRED: The topic to consume from.' )
213221_kafka_consumer_perf_test_args+=(' --version: Display Kafka version.' )
214222compdef " _kafka-command kafka-consumer-perf-test" kafka-consumer-perf-test
223+ compdef " _kafka-command kafka-consumer-perf-test" kafka-consumer-perf-test.sh
215224declare -a _kafka_delegation_tokens_args
216225_kafka_delegation_tokens_args=()
217226_kafka_delegation_tokens_args+=(' --bootstrap-server: REQUIRED: server(s) to use for bootstrapping.' )
@@ -229,6 +238,7 @@ _kafka_delegation_tokens_args+=('--renew-time-period: Renew time period in milli
229238_kafka_delegation_tokens_args+=(' --renewer-principal: renewer is a kafka principal. It is should be in principalType:name format.' )
230239_kafka_delegation_tokens_args+=(' --version: Display Kafka version.' )
231240compdef " _kafka-command kafka-delegation-tokens" kafka-delegation-tokens
241+ compdef " _kafka-command kafka-delegation-tokens" kafka-delegation-tokens.sh
232242declare -a _kafka_topics_args
233243_kafka_topics_args=()
234244_kafka_topics_args+=(' --alter: Alter the number of partitions, replica assignment, and/or configuration for the topic.' )
@@ -259,9 +269,11 @@ _kafka_topics_args+=('--under-replicated-partitions: if set when describing topi
259269_kafka_topics_args+=(' --version: Display Kafka version.' )
260270_kafka_topics_args+=(' --zookeeper: DEPRECATED, The connection string for the zookeeper connection in the form host:port. Multiple hosts can be given to allow fail-over.' )
261271compdef " _kafka-command kafka-topics" kafka-topics
272+ compdef " _kafka-command kafka-topics" kafka-topics.sh
262273declare -a _kafka_producer_perf_test_args
263274_kafka_producer_perf_test_args=()
264275compdef " _kafka-command kafka-producer-perf-test" kafka-producer-perf-test
276+ compdef " _kafka-command kafka-producer-perf-test" kafka-producer-perf-test.sh
265277declare -a _kafka_dump_log_args
266278_kafka_dump_log_args=()
267279_kafka_dump_log_args+=(' --deep-iteration: if set, uses deep instead of shallow iteration. Automatically set if print- data-log is enabled.' )
@@ -277,6 +289,7 @@ _kafka_dump_log_args+=('--value-decoder-class: if set, used to deserialize the m
277289_kafka_dump_log_args+=(' --verify-index-only: if set, just verify the index log without printing its content.' )
278290_kafka_dump_log_args+=(' --version: Display Kafka version.' )
279291compdef " _kafka-command kafka-dump-log" kafka-dump-log
292+ compdef " _kafka-command kafka-dump-log" kafka-dump-log.sh
280293declare -a _kafka_log_dirs_args
281294_kafka_log_dirs_args=()
282295_kafka_log_dirs_args+=(' --bootstrap-server: REQUIRED: the server(s) to use for bootstrapping' )
@@ -287,12 +300,15 @@ _kafka_log_dirs_args+=('--help: Print usage information.')
287300_kafka_log_dirs_args+=(' --topic-list: The list of topics to be queried in the form "topic1,topic2,topic3". All topics will be queried if no topic list is specified (default: )' )
288301_kafka_log_dirs_args+=(' --version: Display Kafka version.' )
289302compdef " _kafka-command kafka-log-dirs" kafka-log-dirs
303+ compdef " _kafka-command kafka-log-dirs" kafka-log-dirs.sh
290304declare -a _kafka_verifiable_consumer_args
291305_kafka_verifiable_consumer_args=()
292306compdef " _kafka-command kafka-verifiable-consumer" kafka-verifiable-consumer
307+ compdef " _kafka-command kafka-verifiable-consumer" kafka-verifiable-consumer.sh
293308declare -a _kafka_verifiable_producer_args
294309_kafka_verifiable_producer_args=()
295310compdef " _kafka-command kafka-verifiable-producer" kafka-verifiable-producer
311+ compdef " _kafka-command kafka-verifiable-producer" kafka-verifiable-producer.sh
296312declare -a _kafka_streams_application_reset_args
297313_kafka_streams_application_reset_args=()
298314_kafka_streams_application_reset_args+=(' --bootstrap-servers: Comma-separated list of broker urls with format: HOST1:PORT1,HOST2:PORT2 (default: localhost:9092)' )
@@ -312,6 +328,7 @@ _kafka_streams_application_reset_args+=('--to-offset: Reset offsets to a specifi
312328_kafka_streams_application_reset_args+=(' --version: Print version information and exit.' )
313329_kafka_streams_application_reset_args+=(' --zookeeper: Zookeeper option is deprecated by bootstrap.servers, as the reset tool would no longer access Zookeeper directly.' )
314330compdef " _kafka-command kafka-streams-application-reset" kafka-streams-application-reset
331+ compdef " _kafka-command kafka-streams-application-reset" kafka-streams-application-reset.sh
315332declare -a _kafka_mirror_maker_args
316333_kafka_mirror_maker_args=()
317334_kafka_mirror_maker_args+=(' --abort.on.send.failure: Configure the mirror maker to exit on a failed send. (default: true)' )
@@ -328,6 +345,7 @@ _kafka_mirror_maker_args+=('--rebalance.listener.args: Arguments used by custom
328345_kafka_mirror_maker_args+=(' --version: Display Kafka version.' )
329346_kafka_mirror_maker_args+=(' --whitelist: Whitelist of topics to mirror.' )
330347compdef " _kafka-command kafka-mirror-maker" kafka-mirror-maker
348+ compdef " _kafka-command kafka-mirror-maker" kafka-mirror-maker.sh
331349declare -a _kafka_delete_records_args
332350_kafka_delete_records_args=()
333351_kafka_delete_records_args+=(' --bootstrap-server: REQUIRED: The server to connect to.' )
@@ -336,9 +354,11 @@ _kafka_delete_records_args+=('--help: Print usage information.')
336354_kafka_delete_records_args+=(' --offset-json-file: REQUIRED: The JSON file with offset per partition. The format to use is: {"partitions": [{"topic": "foo", "partition": 1, "offset": 1}], "version":1 }' )
337355_kafka_delete_records_args+=(' --version: Display Kafka version.' )
338356compdef " _kafka-command kafka-delete-records" kafka-delete-records
357+ compdef " _kafka-command kafka-delete-records" kafka-delete-records.sh
339358declare -a _replicator_args
340359_replicator_args=()
341360compdef " _kafka-command replicator" replicator
361+ compdef " _kafka-command replicator" replicator.sh
342362declare -a _kafka_reassign_partitions_args
343363_kafka_reassign_partitions_args=()
344364_kafka_reassign_partitions_args+=(' --bootstrap-server: the server(s) to use for bootstrapping. REQUIRED if an absolute path of the log directory is specified for any replica in the reassignment json file' )
@@ -357,3 +377,4 @@ _kafka_reassign_partitions_args+=('--verify: Verify if the reassignment complete
357377_kafka_reassign_partitions_args+=(' --version: Display Kafka version.' )
358378_kafka_reassign_partitions_args+=(' --zookeeper: REQUIRED: The connection string for the zookeeper connection in the form host:port. Multiple URLS can be given to allow fail-over.' )
359379compdef " _kafka-command kafka-reassign-partitions" kafka-reassign-partitions
380+ compdef " _kafka-command kafka-reassign-partitions" kafka-reassign-partitions.sh
0 commit comments